1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3 * Copyright (c) 2016-2018 Oracle. All rights reserved.
4 * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
5 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the BSD-type
11 * license below:
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 *
17 * Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 *
20 * Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials provided
23 * with the distribution.
24 *
25 * Neither the name of the Network Appliance, Inc. nor the names of
26 * its contributors may be used to endorse or promote products
27 * derived from this software without specific prior written
28 * permission.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
33 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
34 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
35 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
36 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
37 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
38 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
39 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
40 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 *
42 * Author: Tom Tucker <tom@opengridcomputing.com>
43 */
44
45 /* Operation
46 *
47 * The main entry point is svc_rdma_sendto. This is called by the
48 * RPC server when an RPC Reply is ready to be transmitted to a client.
49 *
50 * The passed-in svc_rqst contains a struct xdr_buf which holds an
51 * XDR-encoded RPC Reply message. sendto must construct the RPC-over-RDMA
52 * transport header, post all Write WRs needed for this Reply, then post
53 * a Send WR conveying the transport header and the RPC message itself to
54 * the client.
55 *
56 * svc_rdma_sendto must fully transmit the Reply before returning, as
57 * the svc_rqst will be recycled as soon as sendto returns. Remaining
58 * resources referred to by the svc_rqst are also recycled at that time.
59 * Therefore any resources that must remain longer must be detached
60 * from the svc_rqst and released later.
61 *
62 * Page Management
63 *
64 * The I/O that performs Reply transmission is asynchronous, and may
65 * complete well after sendto returns. Thus pages under I/O must be
66 * removed from the svc_rqst before sendto returns.
67 *
68 * The logic here depends on Send Queue and completion ordering. Since
69 * the Send WR is always posted last, it will always complete last. Thus
70 * when it completes, it is guaranteed that all previous Write WRs have
71 * also completed.
72 *
73 * Write WRs are constructed and posted. Each Write segment gets its own
74 * svc_rdma_rw_ctxt, allowing the Write completion handler to find and
75 * DMA-unmap the pages under I/O for that Write segment. The Write
76 * completion handler does not release any pages.
77 *
78 * When the Send WR is constructed, it also gets its own svc_rdma_send_ctxt.
79 * The ownership of all of the Reply's pages are transferred into that
80 * ctxt, the Send WR is posted, and sendto returns.
81 *
82 * The svc_rdma_send_ctxt is presented when the Send WR completes. The
83 * Send completion handler finally releases the Reply's pages.
84 *
85 * This mechanism also assumes that completions on the transport's Send
86 * Completion Queue do not run in parallel. Otherwise a Write completion
87 * and Send completion running at the same time could release pages that
88 * are still DMA-mapped.
89 *
90 * Error Handling
91 *
92 * - If the Send WR is posted successfully, it will either complete
93 * successfully, or get flushed. Either way, the Send completion
94 * handler releases the Reply's pages.
95 * - If the Send WR cannot be not posted, the forward path releases
96 * the Reply's pages.
97 *
98 * This handles the case, without the use of page reference counting,
99 * where two different Write segments send portions of the same page.
100 */
101
102 #include <linux/spinlock.h>
103 #include <asm/unaligned.h>
104
105 #include <rdma/ib_verbs.h>
106 #include <rdma/rdma_cm.h>
107
108 #include <linux/sunrpc/debug.h>
109 #include <linux/sunrpc/rpc_rdma.h>
110 #include <linux/sunrpc/svc_rdma.h>
111
112 #include "xprt_rdma.h"
113 #include <trace/events/rpcrdma.h>
114
115 #define RPCDBG_FACILITY RPCDBG_SVCXPRT
116
117 static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc);
118
119 static inline struct svc_rdma_send_ctxt *
svc_rdma_next_send_ctxt(struct list_head * list)120 svc_rdma_next_send_ctxt(struct list_head *list)
121 {
122 return list_first_entry_or_null(list, struct svc_rdma_send_ctxt,
123 sc_list);
124 }
125
126 static struct svc_rdma_send_ctxt *
svc_rdma_send_ctxt_alloc(struct svcxprt_rdma * rdma)127 svc_rdma_send_ctxt_alloc(struct svcxprt_rdma *rdma)
128 {
129 struct svc_rdma_send_ctxt *ctxt;
130 dma_addr_t addr;
131 void *buffer;
132 size_t size;
133 int i;
134
135 size = sizeof(*ctxt);
136 size += rdma->sc_max_send_sges * sizeof(struct ib_sge);
137 ctxt = kmalloc(size, GFP_KERNEL);
138 if (!ctxt)
139 goto fail0;
140 buffer = kmalloc(rdma->sc_max_req_size, GFP_KERNEL);
141 if (!buffer)
142 goto fail1;
143 addr = ib_dma_map_single(rdma->sc_pd->device, buffer,
144 rdma->sc_max_req_size, DMA_TO_DEVICE);
145 if (ib_dma_mapping_error(rdma->sc_pd->device, addr))
146 goto fail2;
147
148 ctxt->sc_send_wr.next = NULL;
149 ctxt->sc_send_wr.wr_cqe = &ctxt->sc_cqe;
150 ctxt->sc_send_wr.sg_list = ctxt->sc_sges;
151 ctxt->sc_send_wr.send_flags = IB_SEND_SIGNALED;
152 ctxt->sc_cqe.done = svc_rdma_wc_send;
153 ctxt->sc_xprt_buf = buffer;
154 ctxt->sc_sges[0].addr = addr;
155
156 for (i = 0; i < rdma->sc_max_send_sges; i++)
157 ctxt->sc_sges[i].lkey = rdma->sc_pd->local_dma_lkey;
158 return ctxt;
159
160 fail2:
161 kfree(buffer);
162 fail1:
163 kfree(ctxt);
164 fail0:
165 return NULL;
166 }
167
168 /**
169 * svc_rdma_send_ctxts_destroy - Release all send_ctxt's for an xprt
170 * @rdma: svcxprt_rdma being torn down
171 *
172 */
svc_rdma_send_ctxts_destroy(struct svcxprt_rdma * rdma)173 void svc_rdma_send_ctxts_destroy(struct svcxprt_rdma *rdma)
174 {
175 struct svc_rdma_send_ctxt *ctxt;
176
177 while ((ctxt = svc_rdma_next_send_ctxt(&rdma->sc_send_ctxts))) {
178 list_del(&ctxt->sc_list);
179 ib_dma_unmap_single(rdma->sc_pd->device,
180 ctxt->sc_sges[0].addr,
181 rdma->sc_max_req_size,
182 DMA_TO_DEVICE);
183 kfree(ctxt->sc_xprt_buf);
184 kfree(ctxt);
185 }
186 }
187
188 /**
189 * svc_rdma_send_ctxt_get - Get a free send_ctxt
190 * @rdma: controlling svcxprt_rdma
191 *
192 * Returns a ready-to-use send_ctxt, or NULL if none are
193 * available and a fresh one cannot be allocated.
194 */
svc_rdma_send_ctxt_get(struct svcxprt_rdma * rdma)195 struct svc_rdma_send_ctxt *svc_rdma_send_ctxt_get(struct svcxprt_rdma *rdma)
196 {
197 struct svc_rdma_send_ctxt *ctxt;
198
199 spin_lock(&rdma->sc_send_lock);
200 ctxt = svc_rdma_next_send_ctxt(&rdma->sc_send_ctxts);
201 if (!ctxt)
202 goto out_empty;
203 list_del(&ctxt->sc_list);
204 spin_unlock(&rdma->sc_send_lock);
205
206 out:
207 ctxt->sc_send_wr.num_sge = 0;
208 ctxt->sc_cur_sge_no = 0;
209 ctxt->sc_page_count = 0;
210 return ctxt;
211
212 out_empty:
213 spin_unlock(&rdma->sc_send_lock);
214 ctxt = svc_rdma_send_ctxt_alloc(rdma);
215 if (!ctxt)
216 return NULL;
217 goto out;
218 }
219
220 /**
221 * svc_rdma_send_ctxt_put - Return send_ctxt to free list
222 * @rdma: controlling svcxprt_rdma
223 * @ctxt: object to return to the free list
224 *
225 * Pages left in sc_pages are DMA unmapped and released.
226 */
svc_rdma_send_ctxt_put(struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * ctxt)227 void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma,
228 struct svc_rdma_send_ctxt *ctxt)
229 {
230 struct ib_device *device = rdma->sc_cm_id->device;
231 unsigned int i;
232
233 /* The first SGE contains the transport header, which
234 * remains mapped until @ctxt is destroyed.
235 */
236 for (i = 1; i < ctxt->sc_send_wr.num_sge; i++)
237 ib_dma_unmap_page(device,
238 ctxt->sc_sges[i].addr,
239 ctxt->sc_sges[i].length,
240 DMA_TO_DEVICE);
241
242 for (i = 0; i < ctxt->sc_page_count; ++i)
243 put_page(ctxt->sc_pages[i]);
244
245 spin_lock(&rdma->sc_send_lock);
246 list_add(&ctxt->sc_list, &rdma->sc_send_ctxts);
247 spin_unlock(&rdma->sc_send_lock);
248 }
249
250 /**
251 * svc_rdma_wc_send - Invoked by RDMA provider for each polled Send WC
252 * @cq: Completion Queue context
253 * @wc: Work Completion object
254 *
255 * NB: The svc_xprt/svcxprt_rdma is pinned whenever it's possible that
256 * the Send completion handler could be running.
257 */
svc_rdma_wc_send(struct ib_cq * cq,struct ib_wc * wc)258 static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
259 {
260 struct svcxprt_rdma *rdma = cq->cq_context;
261 struct ib_cqe *cqe = wc->wr_cqe;
262 struct svc_rdma_send_ctxt *ctxt;
263
264 trace_svcrdma_wc_send(wc);
265
266 atomic_inc(&rdma->sc_sq_avail);
267 wake_up(&rdma->sc_send_wait);
268
269 ctxt = container_of(cqe, struct svc_rdma_send_ctxt, sc_cqe);
270 svc_rdma_send_ctxt_put(rdma, ctxt);
271
272 if (unlikely(wc->status != IB_WC_SUCCESS)) {
273 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
274 svc_xprt_enqueue(&rdma->sc_xprt);
275 if (wc->status != IB_WC_WR_FLUSH_ERR)
276 pr_err("svcrdma: Send: %s (%u/0x%x)\n",
277 ib_wc_status_msg(wc->status),
278 wc->status, wc->vendor_err);
279 }
280
281 svc_xprt_put(&rdma->sc_xprt);
282 }
283
284 /**
285 * svc_rdma_send - Post a single Send WR
286 * @rdma: transport on which to post the WR
287 * @wr: prepared Send WR to post
288 *
289 * Returns zero the Send WR was posted successfully. Otherwise, a
290 * negative errno is returned.
291 */
svc_rdma_send(struct svcxprt_rdma * rdma,struct ib_send_wr * wr)292 int svc_rdma_send(struct svcxprt_rdma *rdma, struct ib_send_wr *wr)
293 {
294 int ret;
295
296 might_sleep();
297
298 /* If the SQ is full, wait until an SQ entry is available */
299 while (1) {
300 if ((atomic_dec_return(&rdma->sc_sq_avail) < 0)) {
301 atomic_inc(&rdma_stat_sq_starve);
302 trace_svcrdma_sq_full(rdma);
303 atomic_inc(&rdma->sc_sq_avail);
304 wait_event(rdma->sc_send_wait,
305 atomic_read(&rdma->sc_sq_avail) > 1);
306 if (test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags))
307 return -ENOTCONN;
308 trace_svcrdma_sq_retry(rdma);
309 continue;
310 }
311
312 svc_xprt_get(&rdma->sc_xprt);
313 trace_svcrdma_post_send(wr);
314 ret = ib_post_send(rdma->sc_qp, wr, NULL);
315 if (ret)
316 break;
317 return 0;
318 }
319
320 trace_svcrdma_sq_post_err(rdma, ret);
321 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
322 svc_xprt_put(&rdma->sc_xprt);
323 wake_up(&rdma->sc_send_wait);
324 return ret;
325 }
326
xdr_padsize(u32 len)327 static u32 xdr_padsize(u32 len)
328 {
329 return (len & 3) ? (4 - (len & 3)) : 0;
330 }
331
332 /* Returns length of transport header, in bytes.
333 */
svc_rdma_reply_hdr_len(__be32 * rdma_resp)334 static unsigned int svc_rdma_reply_hdr_len(__be32 *rdma_resp)
335 {
336 unsigned int nsegs;
337 __be32 *p;
338
339 p = rdma_resp;
340
341 /* RPC-over-RDMA V1 replies never have a Read list. */
342 p += rpcrdma_fixed_maxsz + 1;
343
344 /* Skip Write list. */
345 while (*p++ != xdr_zero) {
346 nsegs = be32_to_cpup(p++);
347 p += nsegs * rpcrdma_segment_maxsz;
348 }
349
350 /* Skip Reply chunk. */
351 if (*p++ != xdr_zero) {
352 nsegs = be32_to_cpup(p++);
353 p += nsegs * rpcrdma_segment_maxsz;
354 }
355
356 return (unsigned long)p - (unsigned long)rdma_resp;
357 }
358
359 /* One Write chunk is copied from Call transport header to Reply
360 * transport header. Each segment's length field is updated to
361 * reflect number of bytes consumed in the segment.
362 *
363 * Returns number of segments in this chunk.
364 */
xdr_encode_write_chunk(__be32 * dst,__be32 * src,unsigned int remaining)365 static unsigned int xdr_encode_write_chunk(__be32 *dst, __be32 *src,
366 unsigned int remaining)
367 {
368 unsigned int i, nsegs;
369 u32 seg_len;
370
371 /* Write list discriminator */
372 *dst++ = *src++;
373
374 /* number of segments in this chunk */
375 nsegs = be32_to_cpup(src);
376 *dst++ = *src++;
377
378 for (i = nsegs; i; i--) {
379 /* segment's RDMA handle */
380 *dst++ = *src++;
381
382 /* bytes returned in this segment */
383 seg_len = be32_to_cpu(*src);
384 if (remaining >= seg_len) {
385 /* entire segment was consumed */
386 *dst = *src;
387 remaining -= seg_len;
388 } else {
389 /* segment only partly filled */
390 *dst = cpu_to_be32(remaining);
391 remaining = 0;
392 }
393 dst++; src++;
394
395 /* segment's RDMA offset */
396 *dst++ = *src++;
397 *dst++ = *src++;
398 }
399
400 return nsegs;
401 }
402
403 /* The client provided a Write list in the Call message. Fill in
404 * the segments in the first Write chunk in the Reply's transport
405 * header with the number of bytes consumed in each segment.
406 * Remaining chunks are returned unused.
407 *
408 * Assumptions:
409 * - Client has provided only one Write chunk
410 */
svc_rdma_xdr_encode_write_list(__be32 * rdma_resp,__be32 * wr_ch,unsigned int consumed)411 static void svc_rdma_xdr_encode_write_list(__be32 *rdma_resp, __be32 *wr_ch,
412 unsigned int consumed)
413 {
414 unsigned int nsegs;
415 __be32 *p, *q;
416
417 /* RPC-over-RDMA V1 replies never have a Read list. */
418 p = rdma_resp + rpcrdma_fixed_maxsz + 1;
419
420 q = wr_ch;
421 while (*q != xdr_zero) {
422 nsegs = xdr_encode_write_chunk(p, q, consumed);
423 q += 2 + nsegs * rpcrdma_segment_maxsz;
424 p += 2 + nsegs * rpcrdma_segment_maxsz;
425 consumed = 0;
426 }
427
428 /* Terminate Write list */
429 *p++ = xdr_zero;
430
431 /* Reply chunk discriminator; may be replaced later */
432 *p = xdr_zero;
433 }
434
435 /* The client provided a Reply chunk in the Call message. Fill in
436 * the segments in the Reply chunk in the Reply message with the
437 * number of bytes consumed in each segment.
438 *
439 * Assumptions:
440 * - Reply can always fit in the provided Reply chunk
441 */
svc_rdma_xdr_encode_reply_chunk(__be32 * rdma_resp,__be32 * rp_ch,unsigned int consumed)442 static void svc_rdma_xdr_encode_reply_chunk(__be32 *rdma_resp, __be32 *rp_ch,
443 unsigned int consumed)
444 {
445 __be32 *p;
446
447 /* Find the Reply chunk in the Reply's xprt header.
448 * RPC-over-RDMA V1 replies never have a Read list.
449 */
450 p = rdma_resp + rpcrdma_fixed_maxsz + 1;
451
452 /* Skip past Write list */
453 while (*p++ != xdr_zero)
454 p += 1 + be32_to_cpup(p) * rpcrdma_segment_maxsz;
455
456 xdr_encode_write_chunk(p, rp_ch, consumed);
457 }
458
459 /* Parse the RPC Call's transport header.
460 */
svc_rdma_get_write_arrays(__be32 * rdma_argp,__be32 ** write,__be32 ** reply)461 static void svc_rdma_get_write_arrays(__be32 *rdma_argp,
462 __be32 **write, __be32 **reply)
463 {
464 __be32 *p;
465
466 p = rdma_argp + rpcrdma_fixed_maxsz;
467
468 /* Read list */
469 while (*p++ != xdr_zero)
470 p += 5;
471
472 /* Write list */
473 if (*p != xdr_zero) {
474 *write = p;
475 while (*p++ != xdr_zero)
476 p += 1 + be32_to_cpu(*p) * 4;
477 } else {
478 *write = NULL;
479 p++;
480 }
481
482 /* Reply chunk */
483 if (*p != xdr_zero)
484 *reply = p;
485 else
486 *reply = NULL;
487 }
488
489 /* RPC-over-RDMA Version One private extension: Remote Invalidation.
490 * Responder's choice: requester signals it can handle Send With
491 * Invalidate, and responder chooses one rkey to invalidate.
492 *
493 * Find a candidate rkey to invalidate when sending a reply. Picks the
494 * first R_key it finds in the chunk lists.
495 *
496 * Returns zero if RPC's chunk lists are empty.
497 */
svc_rdma_get_inv_rkey(__be32 * rdma_argp,__be32 * wr_lst,__be32 * rp_ch)498 static u32 svc_rdma_get_inv_rkey(__be32 *rdma_argp,
499 __be32 *wr_lst, __be32 *rp_ch)
500 {
501 __be32 *p;
502
503 p = rdma_argp + rpcrdma_fixed_maxsz;
504 if (*p != xdr_zero)
505 p += 2;
506 else if (wr_lst && be32_to_cpup(wr_lst + 1))
507 p = wr_lst + 2;
508 else if (rp_ch && be32_to_cpup(rp_ch + 1))
509 p = rp_ch + 2;
510 else
511 return 0;
512 return be32_to_cpup(p);
513 }
514
svc_rdma_dma_map_page(struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * ctxt,struct page * page,unsigned long offset,unsigned int len)515 static int svc_rdma_dma_map_page(struct svcxprt_rdma *rdma,
516 struct svc_rdma_send_ctxt *ctxt,
517 struct page *page,
518 unsigned long offset,
519 unsigned int len)
520 {
521 struct ib_device *dev = rdma->sc_cm_id->device;
522 dma_addr_t dma_addr;
523
524 dma_addr = ib_dma_map_page(dev, page, offset, len, DMA_TO_DEVICE);
525 if (ib_dma_mapping_error(dev, dma_addr))
526 goto out_maperr;
527
528 ctxt->sc_sges[ctxt->sc_cur_sge_no].addr = dma_addr;
529 ctxt->sc_sges[ctxt->sc_cur_sge_no].length = len;
530 ctxt->sc_send_wr.num_sge++;
531 return 0;
532
533 out_maperr:
534 trace_svcrdma_dma_map_page(rdma, page);
535 return -EIO;
536 }
537
538 /* ib_dma_map_page() is used here because svc_rdma_dma_unmap()
539 * handles DMA-unmap and it uses ib_dma_unmap_page() exclusively.
540 */
svc_rdma_dma_map_buf(struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * ctxt,unsigned char * base,unsigned int len)541 static int svc_rdma_dma_map_buf(struct svcxprt_rdma *rdma,
542 struct svc_rdma_send_ctxt *ctxt,
543 unsigned char *base,
544 unsigned int len)
545 {
546 return svc_rdma_dma_map_page(rdma, ctxt, virt_to_page(base),
547 offset_in_page(base), len);
548 }
549
550 /**
551 * svc_rdma_sync_reply_hdr - DMA sync the transport header buffer
552 * @rdma: controlling transport
553 * @ctxt: send_ctxt for the Send WR
554 * @len: length of transport header
555 *
556 */
svc_rdma_sync_reply_hdr(struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * ctxt,unsigned int len)557 void svc_rdma_sync_reply_hdr(struct svcxprt_rdma *rdma,
558 struct svc_rdma_send_ctxt *ctxt,
559 unsigned int len)
560 {
561 ctxt->sc_sges[0].length = len;
562 ctxt->sc_send_wr.num_sge++;
563 ib_dma_sync_single_for_device(rdma->sc_pd->device,
564 ctxt->sc_sges[0].addr, len,
565 DMA_TO_DEVICE);
566 }
567
568 /* If the xdr_buf has more elements than the device can
569 * transmit in a single RDMA Send, then the reply will
570 * have to be copied into a bounce buffer.
571 */
svc_rdma_pull_up_needed(struct svcxprt_rdma * rdma,struct xdr_buf * xdr,__be32 * wr_lst)572 static bool svc_rdma_pull_up_needed(struct svcxprt_rdma *rdma,
573 struct xdr_buf *xdr,
574 __be32 *wr_lst)
575 {
576 int elements;
577
578 /* xdr->head */
579 elements = 1;
580
581 /* xdr->pages */
582 if (!wr_lst) {
583 unsigned int remaining;
584 unsigned long pageoff;
585
586 pageoff = xdr->page_base & ~PAGE_MASK;
587 remaining = xdr->page_len;
588 while (remaining) {
589 ++elements;
590 remaining -= min_t(u32, PAGE_SIZE - pageoff,
591 remaining);
592 pageoff = 0;
593 }
594 }
595
596 /* xdr->tail */
597 if (xdr->tail[0].iov_len)
598 ++elements;
599
600 /* assume 1 SGE is needed for the transport header */
601 return elements >= rdma->sc_max_send_sges;
602 }
603
604 /* The device is not capable of sending the reply directly.
605 * Assemble the elements of @xdr into the transport header
606 * buffer.
607 */
svc_rdma_pull_up_reply_msg(struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * ctxt,struct xdr_buf * xdr,__be32 * wr_lst)608 static int svc_rdma_pull_up_reply_msg(struct svcxprt_rdma *rdma,
609 struct svc_rdma_send_ctxt *ctxt,
610 struct xdr_buf *xdr, __be32 *wr_lst)
611 {
612 unsigned char *dst, *tailbase;
613 unsigned int taillen;
614
615 dst = ctxt->sc_xprt_buf;
616 dst += ctxt->sc_sges[0].length;
617
618 memcpy(dst, xdr->head[0].iov_base, xdr->head[0].iov_len);
619 dst += xdr->head[0].iov_len;
620
621 tailbase = xdr->tail[0].iov_base;
622 taillen = xdr->tail[0].iov_len;
623 if (wr_lst) {
624 u32 xdrpad;
625
626 xdrpad = xdr_padsize(xdr->page_len);
627 if (taillen && xdrpad) {
628 tailbase += xdrpad;
629 taillen -= xdrpad;
630 }
631 } else {
632 unsigned int len, remaining;
633 unsigned long pageoff;
634 struct page **ppages;
635
636 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
637 pageoff = xdr->page_base & ~PAGE_MASK;
638 remaining = xdr->page_len;
639 while (remaining) {
640 len = min_t(u32, PAGE_SIZE - pageoff, remaining);
641
642 memcpy(dst, page_address(*ppages) + pageoff, len);
643 remaining -= len;
644 dst += len;
645 pageoff = 0;
646 ppages++;
647 }
648 }
649
650 if (taillen)
651 memcpy(dst, tailbase, taillen);
652
653 ctxt->sc_sges[0].length += xdr->len;
654 ib_dma_sync_single_for_device(rdma->sc_pd->device,
655 ctxt->sc_sges[0].addr,
656 ctxt->sc_sges[0].length,
657 DMA_TO_DEVICE);
658
659 return 0;
660 }
661
662 /* svc_rdma_map_reply_msg - Map the buffer holding RPC message
663 * @rdma: controlling transport
664 * @ctxt: send_ctxt for the Send WR
665 * @xdr: prepared xdr_buf containing RPC message
666 * @wr_lst: pointer to Call header's Write list, or NULL
667 *
668 * Load the xdr_buf into the ctxt's sge array, and DMA map each
669 * element as it is added.
670 *
671 * Returns zero on success, or a negative errno on failure.
672 */
svc_rdma_map_reply_msg(struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * ctxt,struct xdr_buf * xdr,__be32 * wr_lst)673 int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
674 struct svc_rdma_send_ctxt *ctxt,
675 struct xdr_buf *xdr, __be32 *wr_lst)
676 {
677 unsigned int len, remaining;
678 unsigned long page_off;
679 struct page **ppages;
680 unsigned char *base;
681 u32 xdr_pad;
682 int ret;
683
684 if (svc_rdma_pull_up_needed(rdma, xdr, wr_lst))
685 return svc_rdma_pull_up_reply_msg(rdma, ctxt, xdr, wr_lst);
686
687 ++ctxt->sc_cur_sge_no;
688 ret = svc_rdma_dma_map_buf(rdma, ctxt,
689 xdr->head[0].iov_base,
690 xdr->head[0].iov_len);
691 if (ret < 0)
692 return ret;
693
694 /* If a Write chunk is present, the xdr_buf's page list
695 * is not included inline. However the Upper Layer may
696 * have added XDR padding in the tail buffer, and that
697 * should not be included inline.
698 */
699 if (wr_lst) {
700 base = xdr->tail[0].iov_base;
701 len = xdr->tail[0].iov_len;
702 xdr_pad = xdr_padsize(xdr->page_len);
703
704 if (len && xdr_pad) {
705 base += xdr_pad;
706 len -= xdr_pad;
707 }
708
709 goto tail;
710 }
711
712 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
713 page_off = xdr->page_base & ~PAGE_MASK;
714 remaining = xdr->page_len;
715 while (remaining) {
716 len = min_t(u32, PAGE_SIZE - page_off, remaining);
717
718 ++ctxt->sc_cur_sge_no;
719 ret = svc_rdma_dma_map_page(rdma, ctxt, *ppages++,
720 page_off, len);
721 if (ret < 0)
722 return ret;
723
724 remaining -= len;
725 page_off = 0;
726 }
727
728 base = xdr->tail[0].iov_base;
729 len = xdr->tail[0].iov_len;
730 tail:
731 if (len) {
732 ++ctxt->sc_cur_sge_no;
733 ret = svc_rdma_dma_map_buf(rdma, ctxt, base, len);
734 if (ret < 0)
735 return ret;
736 }
737
738 return 0;
739 }
740
741 /* The svc_rqst and all resources it owns are released as soon as
742 * svc_rdma_sendto returns. Transfer pages under I/O to the ctxt
743 * so they are released by the Send completion handler.
744 */
svc_rdma_save_io_pages(struct svc_rqst * rqstp,struct svc_rdma_send_ctxt * ctxt)745 static void svc_rdma_save_io_pages(struct svc_rqst *rqstp,
746 struct svc_rdma_send_ctxt *ctxt)
747 {
748 int i, pages = rqstp->rq_next_page - rqstp->rq_respages;
749
750 ctxt->sc_page_count += pages;
751 for (i = 0; i < pages; i++) {
752 ctxt->sc_pages[i] = rqstp->rq_respages[i];
753 rqstp->rq_respages[i] = NULL;
754 }
755
756 /* Prevent svc_xprt_release from releasing pages in rq_pages */
757 rqstp->rq_next_page = rqstp->rq_respages;
758 }
759
760 /* Prepare the portion of the RPC Reply that will be transmitted
761 * via RDMA Send. The RPC-over-RDMA transport header is prepared
762 * in sc_sges[0], and the RPC xdr_buf is prepared in following sges.
763 *
764 * Depending on whether a Write list or Reply chunk is present,
765 * the server may send all, a portion of, or none of the xdr_buf.
766 * In the latter case, only the transport header (sc_sges[0]) is
767 * transmitted.
768 *
769 * RDMA Send is the last step of transmitting an RPC reply. Pages
770 * involved in the earlier RDMA Writes are here transferred out
771 * of the rqstp and into the ctxt's page array. These pages are
772 * DMA unmapped by each Write completion, but the subsequent Send
773 * completion finally releases these pages.
774 *
775 * Assumptions:
776 * - The Reply's transport header will never be larger than a page.
777 */
svc_rdma_send_reply_msg(struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * ctxt,__be32 * rdma_argp,struct svc_rqst * rqstp,__be32 * wr_lst,__be32 * rp_ch)778 static int svc_rdma_send_reply_msg(struct svcxprt_rdma *rdma,
779 struct svc_rdma_send_ctxt *ctxt,
780 __be32 *rdma_argp,
781 struct svc_rqst *rqstp,
782 __be32 *wr_lst, __be32 *rp_ch)
783 {
784 int ret;
785
786 if (!rp_ch) {
787 ret = svc_rdma_map_reply_msg(rdma, ctxt,
788 &rqstp->rq_res, wr_lst);
789 if (ret < 0)
790 return ret;
791 }
792
793 svc_rdma_save_io_pages(rqstp, ctxt);
794
795 ctxt->sc_send_wr.opcode = IB_WR_SEND;
796 if (rdma->sc_snd_w_inv) {
797 ctxt->sc_send_wr.ex.invalidate_rkey =
798 svc_rdma_get_inv_rkey(rdma_argp, wr_lst, rp_ch);
799 if (ctxt->sc_send_wr.ex.invalidate_rkey)
800 ctxt->sc_send_wr.opcode = IB_WR_SEND_WITH_INV;
801 }
802 dprintk("svcrdma: posting Send WR with %u sge(s)\n",
803 ctxt->sc_send_wr.num_sge);
804 return svc_rdma_send(rdma, &ctxt->sc_send_wr);
805 }
806
807 /* Given the client-provided Write and Reply chunks, the server was not
808 * able to form a complete reply. Return an RDMA_ERROR message so the
809 * client can retire this RPC transaction. As above, the Send completion
810 * routine releases payload pages that were part of a previous RDMA Write.
811 *
812 * Remote Invalidation is skipped for simplicity.
813 */
svc_rdma_send_error_msg(struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * ctxt,struct svc_rqst * rqstp)814 static int svc_rdma_send_error_msg(struct svcxprt_rdma *rdma,
815 struct svc_rdma_send_ctxt *ctxt,
816 struct svc_rqst *rqstp)
817 {
818 __be32 *p;
819 int ret;
820
821 p = ctxt->sc_xprt_buf;
822 trace_svcrdma_err_chunk(*p);
823 p += 3;
824 *p++ = rdma_error;
825 *p = err_chunk;
826 svc_rdma_sync_reply_hdr(rdma, ctxt, RPCRDMA_HDRLEN_ERR);
827
828 svc_rdma_save_io_pages(rqstp, ctxt);
829
830 ctxt->sc_send_wr.opcode = IB_WR_SEND;
831 ret = svc_rdma_send(rdma, &ctxt->sc_send_wr);
832 if (ret) {
833 svc_rdma_send_ctxt_put(rdma, ctxt);
834 return ret;
835 }
836
837 return 0;
838 }
839
svc_rdma_prep_reply_hdr(struct svc_rqst * rqstp)840 void svc_rdma_prep_reply_hdr(struct svc_rqst *rqstp)
841 {
842 }
843
844 /**
845 * svc_rdma_sendto - Transmit an RPC reply
846 * @rqstp: processed RPC request, reply XDR already in ::rq_res
847 *
848 * Any resources still associated with @rqstp are released upon return.
849 * If no reply message was possible, the connection is closed.
850 *
851 * Returns:
852 * %0 if an RPC reply has been successfully posted,
853 * %-ENOMEM if a resource shortage occurred (connection is lost),
854 * %-ENOTCONN if posting failed (connection is lost).
855 */
svc_rdma_sendto(struct svc_rqst * rqstp)856 int svc_rdma_sendto(struct svc_rqst *rqstp)
857 {
858 struct svc_xprt *xprt = rqstp->rq_xprt;
859 struct svcxprt_rdma *rdma =
860 container_of(xprt, struct svcxprt_rdma, sc_xprt);
861 struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt;
862 __be32 *p, *rdma_argp, *rdma_resp, *wr_lst, *rp_ch;
863 struct xdr_buf *xdr = &rqstp->rq_res;
864 struct svc_rdma_send_ctxt *sctxt;
865 int ret;
866
867 rdma_argp = rctxt->rc_recv_buf;
868 svc_rdma_get_write_arrays(rdma_argp, &wr_lst, &rp_ch);
869
870 /* Create the RDMA response header. xprt->xpt_mutex,
871 * acquired in svc_send(), serializes RPC replies. The
872 * code path below that inserts the credit grant value
873 * into each transport header runs only inside this
874 * critical section.
875 */
876 ret = -ENOMEM;
877 sctxt = svc_rdma_send_ctxt_get(rdma);
878 if (!sctxt)
879 goto err0;
880 rdma_resp = sctxt->sc_xprt_buf;
881
882 p = rdma_resp;
883 *p++ = *rdma_argp;
884 *p++ = *(rdma_argp + 1);
885 *p++ = rdma->sc_fc_credits;
886 *p++ = rp_ch ? rdma_nomsg : rdma_msg;
887
888 /* Start with empty chunks */
889 *p++ = xdr_zero;
890 *p++ = xdr_zero;
891 *p = xdr_zero;
892
893 if (wr_lst) {
894 /* XXX: Presume the client sent only one Write chunk */
895 ret = svc_rdma_send_write_chunk(rdma, wr_lst, xdr);
896 if (ret < 0)
897 goto err2;
898 svc_rdma_xdr_encode_write_list(rdma_resp, wr_lst, ret);
899 }
900 if (rp_ch) {
901 ret = svc_rdma_send_reply_chunk(rdma, rp_ch, wr_lst, xdr);
902 if (ret < 0)
903 goto err2;
904 svc_rdma_xdr_encode_reply_chunk(rdma_resp, rp_ch, ret);
905 }
906
907 svc_rdma_sync_reply_hdr(rdma, sctxt, svc_rdma_reply_hdr_len(rdma_resp));
908 ret = svc_rdma_send_reply_msg(rdma, sctxt, rdma_argp, rqstp,
909 wr_lst, rp_ch);
910 if (ret < 0)
911 goto err1;
912 return 0;
913
914 err2:
915 if (ret != -E2BIG && ret != -EINVAL)
916 goto err1;
917
918 ret = svc_rdma_send_error_msg(rdma, sctxt, rqstp);
919 if (ret < 0)
920 goto err1;
921 return 0;
922
923 err1:
924 svc_rdma_send_ctxt_put(rdma, sctxt);
925 err0:
926 trace_svcrdma_send_failed(rqstp, ret);
927 set_bit(XPT_CLOSE, &xprt->xpt_flags);
928 return -ENOTCONN;
929 }
930