• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright (c) 2016-2018 Oracle. All rights reserved.
4  * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
5  * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the BSD-type
11  * license below:
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  *
17  *      Redistributions of source code must retain the above copyright
18  *      notice, this list of conditions and the following disclaimer.
19  *
20  *      Redistributions in binary form must reproduce the above
21  *      copyright notice, this list of conditions and the following
22  *      disclaimer in the documentation and/or other materials provided
23  *      with the distribution.
24  *
25  *      Neither the name of the Network Appliance, Inc. nor the names of
26  *      its contributors may be used to endorse or promote products
27  *      derived from this software without specific prior written
28  *      permission.
29  *
30  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
33  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
34  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
35  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
36  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
37  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
38  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
39  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
40  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41  *
42  * Author: Tom Tucker <tom@opengridcomputing.com>
43  */
44 
45 /* Operation
46  *
47  * The main entry point is svc_rdma_sendto. This is called by the
48  * RPC server when an RPC Reply is ready to be transmitted to a client.
49  *
50  * The passed-in svc_rqst contains a struct xdr_buf which holds an
51  * XDR-encoded RPC Reply message. sendto must construct the RPC-over-RDMA
52  * transport header, post all Write WRs needed for this Reply, then post
53  * a Send WR conveying the transport header and the RPC message itself to
54  * the client.
55  *
56  * svc_rdma_sendto must fully transmit the Reply before returning, as
57  * the svc_rqst will be recycled as soon as sendto returns. Remaining
58  * resources referred to by the svc_rqst are also recycled at that time.
59  * Therefore any resources that must remain longer must be detached
60  * from the svc_rqst and released later.
61  *
62  * Page Management
63  *
64  * The I/O that performs Reply transmission is asynchronous, and may
65  * complete well after sendto returns. Thus pages under I/O must be
66  * removed from the svc_rqst before sendto returns.
67  *
68  * The logic here depends on Send Queue and completion ordering. Since
69  * the Send WR is always posted last, it will always complete last. Thus
70  * when it completes, it is guaranteed that all previous Write WRs have
71  * also completed.
72  *
73  * Write WRs are constructed and posted. Each Write segment gets its own
74  * svc_rdma_rw_ctxt, allowing the Write completion handler to find and
75  * DMA-unmap the pages under I/O for that Write segment. The Write
76  * completion handler does not release any pages.
77  *
78  * When the Send WR is constructed, it also gets its own svc_rdma_send_ctxt.
79  * The ownership of all of the Reply's pages are transferred into that
80  * ctxt, the Send WR is posted, and sendto returns.
81  *
82  * The svc_rdma_send_ctxt is presented when the Send WR completes. The
83  * Send completion handler finally releases the Reply's pages.
84  *
85  * This mechanism also assumes that completions on the transport's Send
86  * Completion Queue do not run in parallel. Otherwise a Write completion
87  * and Send completion running at the same time could release pages that
88  * are still DMA-mapped.
89  *
90  * Error Handling
91  *
92  * - If the Send WR is posted successfully, it will either complete
93  *   successfully, or get flushed. Either way, the Send completion
94  *   handler releases the Reply's pages.
95  * - If the Send WR cannot be not posted, the forward path releases
96  *   the Reply's pages.
97  *
98  * This handles the case, without the use of page reference counting,
99  * where two different Write segments send portions of the same page.
100  */
101 
102 #include <linux/spinlock.h>
103 #include <asm/unaligned.h>
104 
105 #include <rdma/ib_verbs.h>
106 #include <rdma/rdma_cm.h>
107 
108 #include <linux/sunrpc/debug.h>
109 #include <linux/sunrpc/rpc_rdma.h>
110 #include <linux/sunrpc/svc_rdma.h>
111 
112 #include "xprt_rdma.h"
113 #include <trace/events/rpcrdma.h>
114 
115 #define RPCDBG_FACILITY	RPCDBG_SVCXPRT
116 
117 static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc);
118 
119 static inline struct svc_rdma_send_ctxt *
svc_rdma_next_send_ctxt(struct list_head * list)120 svc_rdma_next_send_ctxt(struct list_head *list)
121 {
122 	return list_first_entry_or_null(list, struct svc_rdma_send_ctxt,
123 					sc_list);
124 }
125 
126 static struct svc_rdma_send_ctxt *
svc_rdma_send_ctxt_alloc(struct svcxprt_rdma * rdma)127 svc_rdma_send_ctxt_alloc(struct svcxprt_rdma *rdma)
128 {
129 	struct svc_rdma_send_ctxt *ctxt;
130 	dma_addr_t addr;
131 	void *buffer;
132 	size_t size;
133 	int i;
134 
135 	size = sizeof(*ctxt);
136 	size += rdma->sc_max_send_sges * sizeof(struct ib_sge);
137 	ctxt = kmalloc(size, GFP_KERNEL);
138 	if (!ctxt)
139 		goto fail0;
140 	buffer = kmalloc(rdma->sc_max_req_size, GFP_KERNEL);
141 	if (!buffer)
142 		goto fail1;
143 	addr = ib_dma_map_single(rdma->sc_pd->device, buffer,
144 				 rdma->sc_max_req_size, DMA_TO_DEVICE);
145 	if (ib_dma_mapping_error(rdma->sc_pd->device, addr))
146 		goto fail2;
147 
148 	ctxt->sc_send_wr.next = NULL;
149 	ctxt->sc_send_wr.wr_cqe = &ctxt->sc_cqe;
150 	ctxt->sc_send_wr.sg_list = ctxt->sc_sges;
151 	ctxt->sc_send_wr.send_flags = IB_SEND_SIGNALED;
152 	ctxt->sc_cqe.done = svc_rdma_wc_send;
153 	ctxt->sc_xprt_buf = buffer;
154 	ctxt->sc_sges[0].addr = addr;
155 
156 	for (i = 0; i < rdma->sc_max_send_sges; i++)
157 		ctxt->sc_sges[i].lkey = rdma->sc_pd->local_dma_lkey;
158 	return ctxt;
159 
160 fail2:
161 	kfree(buffer);
162 fail1:
163 	kfree(ctxt);
164 fail0:
165 	return NULL;
166 }
167 
168 /**
169  * svc_rdma_send_ctxts_destroy - Release all send_ctxt's for an xprt
170  * @rdma: svcxprt_rdma being torn down
171  *
172  */
svc_rdma_send_ctxts_destroy(struct svcxprt_rdma * rdma)173 void svc_rdma_send_ctxts_destroy(struct svcxprt_rdma *rdma)
174 {
175 	struct svc_rdma_send_ctxt *ctxt;
176 
177 	while ((ctxt = svc_rdma_next_send_ctxt(&rdma->sc_send_ctxts))) {
178 		list_del(&ctxt->sc_list);
179 		ib_dma_unmap_single(rdma->sc_pd->device,
180 				    ctxt->sc_sges[0].addr,
181 				    rdma->sc_max_req_size,
182 				    DMA_TO_DEVICE);
183 		kfree(ctxt->sc_xprt_buf);
184 		kfree(ctxt);
185 	}
186 }
187 
188 /**
189  * svc_rdma_send_ctxt_get - Get a free send_ctxt
190  * @rdma: controlling svcxprt_rdma
191  *
192  * Returns a ready-to-use send_ctxt, or NULL if none are
193  * available and a fresh one cannot be allocated.
194  */
svc_rdma_send_ctxt_get(struct svcxprt_rdma * rdma)195 struct svc_rdma_send_ctxt *svc_rdma_send_ctxt_get(struct svcxprt_rdma *rdma)
196 {
197 	struct svc_rdma_send_ctxt *ctxt;
198 
199 	spin_lock(&rdma->sc_send_lock);
200 	ctxt = svc_rdma_next_send_ctxt(&rdma->sc_send_ctxts);
201 	if (!ctxt)
202 		goto out_empty;
203 	list_del(&ctxt->sc_list);
204 	spin_unlock(&rdma->sc_send_lock);
205 
206 out:
207 	ctxt->sc_send_wr.num_sge = 0;
208 	ctxt->sc_cur_sge_no = 0;
209 	ctxt->sc_page_count = 0;
210 	return ctxt;
211 
212 out_empty:
213 	spin_unlock(&rdma->sc_send_lock);
214 	ctxt = svc_rdma_send_ctxt_alloc(rdma);
215 	if (!ctxt)
216 		return NULL;
217 	goto out;
218 }
219 
220 /**
221  * svc_rdma_send_ctxt_put - Return send_ctxt to free list
222  * @rdma: controlling svcxprt_rdma
223  * @ctxt: object to return to the free list
224  *
225  * Pages left in sc_pages are DMA unmapped and released.
226  */
svc_rdma_send_ctxt_put(struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * ctxt)227 void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma,
228 			    struct svc_rdma_send_ctxt *ctxt)
229 {
230 	struct ib_device *device = rdma->sc_cm_id->device;
231 	unsigned int i;
232 
233 	/* The first SGE contains the transport header, which
234 	 * remains mapped until @ctxt is destroyed.
235 	 */
236 	for (i = 1; i < ctxt->sc_send_wr.num_sge; i++)
237 		ib_dma_unmap_page(device,
238 				  ctxt->sc_sges[i].addr,
239 				  ctxt->sc_sges[i].length,
240 				  DMA_TO_DEVICE);
241 
242 	for (i = 0; i < ctxt->sc_page_count; ++i)
243 		put_page(ctxt->sc_pages[i]);
244 
245 	spin_lock(&rdma->sc_send_lock);
246 	list_add(&ctxt->sc_list, &rdma->sc_send_ctxts);
247 	spin_unlock(&rdma->sc_send_lock);
248 }
249 
250 /**
251  * svc_rdma_wc_send - Invoked by RDMA provider for each polled Send WC
252  * @cq: Completion Queue context
253  * @wc: Work Completion object
254  *
255  * NB: The svc_xprt/svcxprt_rdma is pinned whenever it's possible that
256  * the Send completion handler could be running.
257  */
svc_rdma_wc_send(struct ib_cq * cq,struct ib_wc * wc)258 static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
259 {
260 	struct svcxprt_rdma *rdma = cq->cq_context;
261 	struct ib_cqe *cqe = wc->wr_cqe;
262 	struct svc_rdma_send_ctxt *ctxt;
263 
264 	trace_svcrdma_wc_send(wc);
265 
266 	atomic_inc(&rdma->sc_sq_avail);
267 	wake_up(&rdma->sc_send_wait);
268 
269 	ctxt = container_of(cqe, struct svc_rdma_send_ctxt, sc_cqe);
270 	svc_rdma_send_ctxt_put(rdma, ctxt);
271 
272 	if (unlikely(wc->status != IB_WC_SUCCESS)) {
273 		set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
274 		svc_xprt_enqueue(&rdma->sc_xprt);
275 	}
276 
277 	svc_xprt_put(&rdma->sc_xprt);
278 }
279 
280 /**
281  * svc_rdma_send - Post a single Send WR
282  * @rdma: transport on which to post the WR
283  * @wr: prepared Send WR to post
284  *
285  * Returns zero the Send WR was posted successfully. Otherwise, a
286  * negative errno is returned.
287  */
svc_rdma_send(struct svcxprt_rdma * rdma,struct ib_send_wr * wr)288 int svc_rdma_send(struct svcxprt_rdma *rdma, struct ib_send_wr *wr)
289 {
290 	int ret;
291 
292 	might_sleep();
293 
294 	/* If the SQ is full, wait until an SQ entry is available */
295 	while (1) {
296 		if ((atomic_dec_return(&rdma->sc_sq_avail) < 0)) {
297 			atomic_inc(&rdma_stat_sq_starve);
298 			trace_svcrdma_sq_full(rdma);
299 			atomic_inc(&rdma->sc_sq_avail);
300 			wait_event(rdma->sc_send_wait,
301 				   atomic_read(&rdma->sc_sq_avail) > 1);
302 			if (test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags))
303 				return -ENOTCONN;
304 			trace_svcrdma_sq_retry(rdma);
305 			continue;
306 		}
307 
308 		svc_xprt_get(&rdma->sc_xprt);
309 		trace_svcrdma_post_send(wr);
310 		ret = ib_post_send(rdma->sc_qp, wr, NULL);
311 		if (ret)
312 			break;
313 		return 0;
314 	}
315 
316 	trace_svcrdma_sq_post_err(rdma, ret);
317 	set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
318 	svc_xprt_put(&rdma->sc_xprt);
319 	wake_up(&rdma->sc_send_wait);
320 	return ret;
321 }
322 
xdr_padsize(u32 len)323 static u32 xdr_padsize(u32 len)
324 {
325 	return (len & 3) ? (4 - (len & 3)) : 0;
326 }
327 
328 /* Returns length of transport header, in bytes.
329  */
svc_rdma_reply_hdr_len(__be32 * rdma_resp)330 static unsigned int svc_rdma_reply_hdr_len(__be32 *rdma_resp)
331 {
332 	unsigned int nsegs;
333 	__be32 *p;
334 
335 	p = rdma_resp;
336 
337 	/* RPC-over-RDMA V1 replies never have a Read list. */
338 	p += rpcrdma_fixed_maxsz + 1;
339 
340 	/* Skip Write list. */
341 	while (*p++ != xdr_zero) {
342 		nsegs = be32_to_cpup(p++);
343 		p += nsegs * rpcrdma_segment_maxsz;
344 	}
345 
346 	/* Skip Reply chunk. */
347 	if (*p++ != xdr_zero) {
348 		nsegs = be32_to_cpup(p++);
349 		p += nsegs * rpcrdma_segment_maxsz;
350 	}
351 
352 	return (unsigned long)p - (unsigned long)rdma_resp;
353 }
354 
355 /* One Write chunk is copied from Call transport header to Reply
356  * transport header. Each segment's length field is updated to
357  * reflect number of bytes consumed in the segment.
358  *
359  * Returns number of segments in this chunk.
360  */
xdr_encode_write_chunk(__be32 * dst,__be32 * src,unsigned int remaining)361 static unsigned int xdr_encode_write_chunk(__be32 *dst, __be32 *src,
362 					   unsigned int remaining)
363 {
364 	unsigned int i, nsegs;
365 	u32 seg_len;
366 
367 	/* Write list discriminator */
368 	*dst++ = *src++;
369 
370 	/* number of segments in this chunk */
371 	nsegs = be32_to_cpup(src);
372 	*dst++ = *src++;
373 
374 	for (i = nsegs; i; i--) {
375 		/* segment's RDMA handle */
376 		*dst++ = *src++;
377 
378 		/* bytes returned in this segment */
379 		seg_len = be32_to_cpu(*src);
380 		if (remaining >= seg_len) {
381 			/* entire segment was consumed */
382 			*dst = *src;
383 			remaining -= seg_len;
384 		} else {
385 			/* segment only partly filled */
386 			*dst = cpu_to_be32(remaining);
387 			remaining = 0;
388 		}
389 		dst++; src++;
390 
391 		/* segment's RDMA offset */
392 		*dst++ = *src++;
393 		*dst++ = *src++;
394 	}
395 
396 	return nsegs;
397 }
398 
399 /* The client provided a Write list in the Call message. Fill in
400  * the segments in the first Write chunk in the Reply's transport
401  * header with the number of bytes consumed in each segment.
402  * Remaining chunks are returned unused.
403  *
404  * Assumptions:
405  *  - Client has provided only one Write chunk
406  */
svc_rdma_xdr_encode_write_list(__be32 * rdma_resp,__be32 * wr_ch,unsigned int consumed)407 static void svc_rdma_xdr_encode_write_list(__be32 *rdma_resp, __be32 *wr_ch,
408 					   unsigned int consumed)
409 {
410 	unsigned int nsegs;
411 	__be32 *p, *q;
412 
413 	/* RPC-over-RDMA V1 replies never have a Read list. */
414 	p = rdma_resp + rpcrdma_fixed_maxsz + 1;
415 
416 	q = wr_ch;
417 	while (*q != xdr_zero) {
418 		nsegs = xdr_encode_write_chunk(p, q, consumed);
419 		q += 2 + nsegs * rpcrdma_segment_maxsz;
420 		p += 2 + nsegs * rpcrdma_segment_maxsz;
421 		consumed = 0;
422 	}
423 
424 	/* Terminate Write list */
425 	*p++ = xdr_zero;
426 
427 	/* Reply chunk discriminator; may be replaced later */
428 	*p = xdr_zero;
429 }
430 
431 /* The client provided a Reply chunk in the Call message. Fill in
432  * the segments in the Reply chunk in the Reply message with the
433  * number of bytes consumed in each segment.
434  *
435  * Assumptions:
436  * - Reply can always fit in the provided Reply chunk
437  */
svc_rdma_xdr_encode_reply_chunk(__be32 * rdma_resp,__be32 * rp_ch,unsigned int consumed)438 static void svc_rdma_xdr_encode_reply_chunk(__be32 *rdma_resp, __be32 *rp_ch,
439 					    unsigned int consumed)
440 {
441 	__be32 *p;
442 
443 	/* Find the Reply chunk in the Reply's xprt header.
444 	 * RPC-over-RDMA V1 replies never have a Read list.
445 	 */
446 	p = rdma_resp + rpcrdma_fixed_maxsz + 1;
447 
448 	/* Skip past Write list */
449 	while (*p++ != xdr_zero)
450 		p += 1 + be32_to_cpup(p) * rpcrdma_segment_maxsz;
451 
452 	xdr_encode_write_chunk(p, rp_ch, consumed);
453 }
454 
455 /* Parse the RPC Call's transport header.
456  */
svc_rdma_get_write_arrays(__be32 * rdma_argp,__be32 ** write,__be32 ** reply)457 static void svc_rdma_get_write_arrays(__be32 *rdma_argp,
458 				      __be32 **write, __be32 **reply)
459 {
460 	__be32 *p;
461 
462 	p = rdma_argp + rpcrdma_fixed_maxsz;
463 
464 	/* Read list */
465 	while (*p++ != xdr_zero)
466 		p += 5;
467 
468 	/* Write list */
469 	if (*p != xdr_zero) {
470 		*write = p;
471 		while (*p++ != xdr_zero)
472 			p += 1 + be32_to_cpu(*p) * 4;
473 	} else {
474 		*write = NULL;
475 		p++;
476 	}
477 
478 	/* Reply chunk */
479 	if (*p != xdr_zero)
480 		*reply = p;
481 	else
482 		*reply = NULL;
483 }
484 
svc_rdma_dma_map_page(struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * ctxt,struct page * page,unsigned long offset,unsigned int len)485 static int svc_rdma_dma_map_page(struct svcxprt_rdma *rdma,
486 				 struct svc_rdma_send_ctxt *ctxt,
487 				 struct page *page,
488 				 unsigned long offset,
489 				 unsigned int len)
490 {
491 	struct ib_device *dev = rdma->sc_cm_id->device;
492 	dma_addr_t dma_addr;
493 
494 	dma_addr = ib_dma_map_page(dev, page, offset, len, DMA_TO_DEVICE);
495 	if (ib_dma_mapping_error(dev, dma_addr))
496 		goto out_maperr;
497 
498 	ctxt->sc_sges[ctxt->sc_cur_sge_no].addr = dma_addr;
499 	ctxt->sc_sges[ctxt->sc_cur_sge_no].length = len;
500 	ctxt->sc_send_wr.num_sge++;
501 	return 0;
502 
503 out_maperr:
504 	trace_svcrdma_dma_map_page(rdma, page);
505 	return -EIO;
506 }
507 
508 /* ib_dma_map_page() is used here because svc_rdma_dma_unmap()
509  * handles DMA-unmap and it uses ib_dma_unmap_page() exclusively.
510  */
svc_rdma_dma_map_buf(struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * ctxt,unsigned char * base,unsigned int len)511 static int svc_rdma_dma_map_buf(struct svcxprt_rdma *rdma,
512 				struct svc_rdma_send_ctxt *ctxt,
513 				unsigned char *base,
514 				unsigned int len)
515 {
516 	return svc_rdma_dma_map_page(rdma, ctxt, virt_to_page(base),
517 				     offset_in_page(base), len);
518 }
519 
520 /**
521  * svc_rdma_sync_reply_hdr - DMA sync the transport header buffer
522  * @rdma: controlling transport
523  * @ctxt: send_ctxt for the Send WR
524  * @len: length of transport header
525  *
526  */
svc_rdma_sync_reply_hdr(struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * ctxt,unsigned int len)527 void svc_rdma_sync_reply_hdr(struct svcxprt_rdma *rdma,
528 			     struct svc_rdma_send_ctxt *ctxt,
529 			     unsigned int len)
530 {
531 	ctxt->sc_sges[0].length = len;
532 	ctxt->sc_send_wr.num_sge++;
533 	ib_dma_sync_single_for_device(rdma->sc_pd->device,
534 				      ctxt->sc_sges[0].addr, len,
535 				      DMA_TO_DEVICE);
536 }
537 
538 /* If the xdr_buf has more elements than the device can
539  * transmit in a single RDMA Send, then the reply will
540  * have to be copied into a bounce buffer.
541  */
svc_rdma_pull_up_needed(struct svcxprt_rdma * rdma,struct xdr_buf * xdr,__be32 * wr_lst)542 static bool svc_rdma_pull_up_needed(struct svcxprt_rdma *rdma,
543 				    struct xdr_buf *xdr,
544 				    __be32 *wr_lst)
545 {
546 	int elements;
547 
548 	/* xdr->head */
549 	elements = 1;
550 
551 	/* xdr->pages */
552 	if (!wr_lst) {
553 		unsigned int remaining;
554 		unsigned long pageoff;
555 
556 		pageoff = xdr->page_base & ~PAGE_MASK;
557 		remaining = xdr->page_len;
558 		while (remaining) {
559 			++elements;
560 			remaining -= min_t(u32, PAGE_SIZE - pageoff,
561 					   remaining);
562 			pageoff = 0;
563 		}
564 	}
565 
566 	/* xdr->tail */
567 	if (xdr->tail[0].iov_len)
568 		++elements;
569 
570 	/* assume 1 SGE is needed for the transport header */
571 	return elements >= rdma->sc_max_send_sges;
572 }
573 
574 /* The device is not capable of sending the reply directly.
575  * Assemble the elements of @xdr into the transport header
576  * buffer.
577  */
svc_rdma_pull_up_reply_msg(struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * ctxt,struct xdr_buf * xdr,__be32 * wr_lst)578 static int svc_rdma_pull_up_reply_msg(struct svcxprt_rdma *rdma,
579 				      struct svc_rdma_send_ctxt *ctxt,
580 				      struct xdr_buf *xdr, __be32 *wr_lst)
581 {
582 	unsigned char *dst, *tailbase;
583 	unsigned int taillen;
584 
585 	dst = ctxt->sc_xprt_buf;
586 	dst += ctxt->sc_sges[0].length;
587 
588 	memcpy(dst, xdr->head[0].iov_base, xdr->head[0].iov_len);
589 	dst += xdr->head[0].iov_len;
590 
591 	tailbase = xdr->tail[0].iov_base;
592 	taillen = xdr->tail[0].iov_len;
593 	if (wr_lst) {
594 		u32 xdrpad;
595 
596 		xdrpad = xdr_padsize(xdr->page_len);
597 		if (taillen && xdrpad) {
598 			tailbase += xdrpad;
599 			taillen -= xdrpad;
600 		}
601 	} else {
602 		unsigned int len, remaining;
603 		unsigned long pageoff;
604 		struct page **ppages;
605 
606 		ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
607 		pageoff = xdr->page_base & ~PAGE_MASK;
608 		remaining = xdr->page_len;
609 		while (remaining) {
610 			len = min_t(u32, PAGE_SIZE - pageoff, remaining);
611 
612 			memcpy(dst, page_address(*ppages) + pageoff, len);
613 			remaining -= len;
614 			dst += len;
615 			pageoff = 0;
616 			ppages++;
617 		}
618 	}
619 
620 	if (taillen)
621 		memcpy(dst, tailbase, taillen);
622 
623 	ctxt->sc_sges[0].length += xdr->len;
624 	ib_dma_sync_single_for_device(rdma->sc_pd->device,
625 				      ctxt->sc_sges[0].addr,
626 				      ctxt->sc_sges[0].length,
627 				      DMA_TO_DEVICE);
628 
629 	return 0;
630 }
631 
632 /* svc_rdma_map_reply_msg - Map the buffer holding RPC message
633  * @rdma: controlling transport
634  * @ctxt: send_ctxt for the Send WR
635  * @xdr: prepared xdr_buf containing RPC message
636  * @wr_lst: pointer to Call header's Write list, or NULL
637  *
638  * Load the xdr_buf into the ctxt's sge array, and DMA map each
639  * element as it is added.
640  *
641  * Returns zero on success, or a negative errno on failure.
642  */
svc_rdma_map_reply_msg(struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * ctxt,struct xdr_buf * xdr,__be32 * wr_lst)643 int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
644 			   struct svc_rdma_send_ctxt *ctxt,
645 			   struct xdr_buf *xdr, __be32 *wr_lst)
646 {
647 	unsigned int len, remaining;
648 	unsigned long page_off;
649 	struct page **ppages;
650 	unsigned char *base;
651 	u32 xdr_pad;
652 	int ret;
653 
654 	if (svc_rdma_pull_up_needed(rdma, xdr, wr_lst))
655 		return svc_rdma_pull_up_reply_msg(rdma, ctxt, xdr, wr_lst);
656 
657 	++ctxt->sc_cur_sge_no;
658 	ret = svc_rdma_dma_map_buf(rdma, ctxt,
659 				   xdr->head[0].iov_base,
660 				   xdr->head[0].iov_len);
661 	if (ret < 0)
662 		return ret;
663 
664 	/* If a Write chunk is present, the xdr_buf's page list
665 	 * is not included inline. However the Upper Layer may
666 	 * have added XDR padding in the tail buffer, and that
667 	 * should not be included inline.
668 	 */
669 	if (wr_lst) {
670 		base = xdr->tail[0].iov_base;
671 		len = xdr->tail[0].iov_len;
672 		xdr_pad = xdr_padsize(xdr->page_len);
673 
674 		if (len && xdr_pad) {
675 			base += xdr_pad;
676 			len -= xdr_pad;
677 		}
678 
679 		goto tail;
680 	}
681 
682 	ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
683 	page_off = xdr->page_base & ~PAGE_MASK;
684 	remaining = xdr->page_len;
685 	while (remaining) {
686 		len = min_t(u32, PAGE_SIZE - page_off, remaining);
687 
688 		++ctxt->sc_cur_sge_no;
689 		ret = svc_rdma_dma_map_page(rdma, ctxt, *ppages++,
690 					    page_off, len);
691 		if (ret < 0)
692 			return ret;
693 
694 		remaining -= len;
695 		page_off = 0;
696 	}
697 
698 	base = xdr->tail[0].iov_base;
699 	len = xdr->tail[0].iov_len;
700 tail:
701 	if (len) {
702 		++ctxt->sc_cur_sge_no;
703 		ret = svc_rdma_dma_map_buf(rdma, ctxt, base, len);
704 		if (ret < 0)
705 			return ret;
706 	}
707 
708 	return 0;
709 }
710 
711 /* The svc_rqst and all resources it owns are released as soon as
712  * svc_rdma_sendto returns. Transfer pages under I/O to the ctxt
713  * so they are released by the Send completion handler.
714  */
svc_rdma_save_io_pages(struct svc_rqst * rqstp,struct svc_rdma_send_ctxt * ctxt)715 static void svc_rdma_save_io_pages(struct svc_rqst *rqstp,
716 				   struct svc_rdma_send_ctxt *ctxt)
717 {
718 	int i, pages = rqstp->rq_next_page - rqstp->rq_respages;
719 
720 	ctxt->sc_page_count += pages;
721 	for (i = 0; i < pages; i++) {
722 		ctxt->sc_pages[i] = rqstp->rq_respages[i];
723 		rqstp->rq_respages[i] = NULL;
724 	}
725 
726 	/* Prevent svc_xprt_release from releasing pages in rq_pages */
727 	rqstp->rq_next_page = rqstp->rq_respages;
728 }
729 
730 /* Prepare the portion of the RPC Reply that will be transmitted
731  * via RDMA Send. The RPC-over-RDMA transport header is prepared
732  * in sc_sges[0], and the RPC xdr_buf is prepared in following sges.
733  *
734  * Depending on whether a Write list or Reply chunk is present,
735  * the server may send all, a portion of, or none of the xdr_buf.
736  * In the latter case, only the transport header (sc_sges[0]) is
737  * transmitted.
738  *
739  * RDMA Send is the last step of transmitting an RPC reply. Pages
740  * involved in the earlier RDMA Writes are here transferred out
741  * of the rqstp and into the sctxt's page array. These pages are
742  * DMA unmapped by each Write completion, but the subsequent Send
743  * completion finally releases these pages.
744  *
745  * Assumptions:
746  * - The Reply's transport header will never be larger than a page.
747  */
svc_rdma_send_reply_msg(struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * sctxt,struct svc_rdma_recv_ctxt * rctxt,struct svc_rqst * rqstp,__be32 * wr_lst,__be32 * rp_ch)748 static int svc_rdma_send_reply_msg(struct svcxprt_rdma *rdma,
749 				   struct svc_rdma_send_ctxt *sctxt,
750 				   struct svc_rdma_recv_ctxt *rctxt,
751 				   struct svc_rqst *rqstp,
752 				   __be32 *wr_lst, __be32 *rp_ch)
753 {
754 	int ret;
755 
756 	if (!rp_ch) {
757 		ret = svc_rdma_map_reply_msg(rdma, sctxt,
758 					     &rqstp->rq_res, wr_lst);
759 		if (ret < 0)
760 			return ret;
761 	}
762 
763 	svc_rdma_save_io_pages(rqstp, sctxt);
764 
765 	if (rctxt->rc_inv_rkey) {
766 		sctxt->sc_send_wr.opcode = IB_WR_SEND_WITH_INV;
767 		sctxt->sc_send_wr.ex.invalidate_rkey = rctxt->rc_inv_rkey;
768 	} else {
769 		sctxt->sc_send_wr.opcode = IB_WR_SEND;
770 	}
771 	dprintk("svcrdma: posting Send WR with %u sge(s)\n",
772 		sctxt->sc_send_wr.num_sge);
773 	return svc_rdma_send(rdma, &sctxt->sc_send_wr);
774 }
775 
776 /* Given the client-provided Write and Reply chunks, the server was not
777  * able to form a complete reply. Return an RDMA_ERROR message so the
778  * client can retire this RPC transaction. As above, the Send completion
779  * routine releases payload pages that were part of a previous RDMA Write.
780  *
781  * Remote Invalidation is skipped for simplicity.
782  */
svc_rdma_send_error_msg(struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * ctxt,struct svc_rqst * rqstp)783 static int svc_rdma_send_error_msg(struct svcxprt_rdma *rdma,
784 				   struct svc_rdma_send_ctxt *ctxt,
785 				   struct svc_rqst *rqstp)
786 {
787 	__be32 *p;
788 	int ret;
789 
790 	p = ctxt->sc_xprt_buf;
791 	trace_svcrdma_err_chunk(*p);
792 	p += 3;
793 	*p++ = rdma_error;
794 	*p   = err_chunk;
795 	svc_rdma_sync_reply_hdr(rdma, ctxt, RPCRDMA_HDRLEN_ERR);
796 
797 	svc_rdma_save_io_pages(rqstp, ctxt);
798 
799 	ctxt->sc_send_wr.opcode = IB_WR_SEND;
800 	ret = svc_rdma_send(rdma, &ctxt->sc_send_wr);
801 	if (ret) {
802 		svc_rdma_send_ctxt_put(rdma, ctxt);
803 		return ret;
804 	}
805 
806 	return 0;
807 }
808 
809 /**
810  * svc_rdma_sendto - Transmit an RPC reply
811  * @rqstp: processed RPC request, reply XDR already in ::rq_res
812  *
813  * Any resources still associated with @rqstp are released upon return.
814  * If no reply message was possible, the connection is closed.
815  *
816  * Returns:
817  *	%0 if an RPC reply has been successfully posted,
818  *	%-ENOMEM if a resource shortage occurred (connection is lost),
819  *	%-ENOTCONN if posting failed (connection is lost).
820  */
svc_rdma_sendto(struct svc_rqst * rqstp)821 int svc_rdma_sendto(struct svc_rqst *rqstp)
822 {
823 	struct svc_xprt *xprt = rqstp->rq_xprt;
824 	struct svcxprt_rdma *rdma =
825 		container_of(xprt, struct svcxprt_rdma, sc_xprt);
826 	struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt;
827 	__be32 *p, *rdma_argp, *rdma_resp, *wr_lst, *rp_ch;
828 	struct xdr_buf *xdr = &rqstp->rq_res;
829 	struct svc_rdma_send_ctxt *sctxt;
830 	int ret;
831 
832 	rdma_argp = rctxt->rc_recv_buf;
833 	svc_rdma_get_write_arrays(rdma_argp, &wr_lst, &rp_ch);
834 
835 	/* Create the RDMA response header. xprt->xpt_mutex,
836 	 * acquired in svc_send(), serializes RPC replies. The
837 	 * code path below that inserts the credit grant value
838 	 * into each transport header runs only inside this
839 	 * critical section.
840 	 */
841 	ret = -ENOMEM;
842 	sctxt = svc_rdma_send_ctxt_get(rdma);
843 	if (!sctxt)
844 		goto err0;
845 	rdma_resp = sctxt->sc_xprt_buf;
846 
847 	p = rdma_resp;
848 	*p++ = *rdma_argp;
849 	*p++ = *(rdma_argp + 1);
850 	*p++ = rdma->sc_fc_credits;
851 	*p++ = rp_ch ? rdma_nomsg : rdma_msg;
852 
853 	/* Start with empty chunks */
854 	*p++ = xdr_zero;
855 	*p++ = xdr_zero;
856 	*p   = xdr_zero;
857 
858 	if (wr_lst) {
859 		/* XXX: Presume the client sent only one Write chunk */
860 		unsigned long offset;
861 		unsigned int length;
862 
863 		if (rctxt->rc_read_payload_length) {
864 			offset = rctxt->rc_read_payload_offset;
865 			length = rctxt->rc_read_payload_length;
866 		} else {
867 			offset = xdr->head[0].iov_len;
868 			length = xdr->page_len;
869 		}
870 		ret = svc_rdma_send_write_chunk(rdma, wr_lst, xdr, offset,
871 						length);
872 		if (ret < 0)
873 			goto err2;
874 		svc_rdma_xdr_encode_write_list(rdma_resp, wr_lst, ret);
875 	}
876 	if (rp_ch) {
877 		ret = svc_rdma_send_reply_chunk(rdma, rp_ch, wr_lst, xdr);
878 		if (ret < 0)
879 			goto err2;
880 		svc_rdma_xdr_encode_reply_chunk(rdma_resp, rp_ch, ret);
881 	}
882 
883 	svc_rdma_sync_reply_hdr(rdma, sctxt, svc_rdma_reply_hdr_len(rdma_resp));
884 	ret = svc_rdma_send_reply_msg(rdma, sctxt, rctxt, rqstp,
885 				      wr_lst, rp_ch);
886 	if (ret < 0)
887 		goto err1;
888 	return 0;
889 
890  err2:
891 	if (ret != -E2BIG && ret != -EINVAL)
892 		goto err1;
893 
894 	ret = svc_rdma_send_error_msg(rdma, sctxt, rqstp);
895 	if (ret < 0)
896 		goto err1;
897 	return 0;
898 
899  err1:
900 	svc_rdma_send_ctxt_put(rdma, sctxt);
901  err0:
902 	trace_svcrdma_send_failed(rqstp, ret);
903 	set_bit(XPT_CLOSE, &xprt->xpt_flags);
904 	return -ENOTCONN;
905 }
906 
907 /**
908  * svc_rdma_read_payload - special processing for a READ payload
909  * @rqstp: svc_rqst to operate on
910  * @offset: payload's byte offset in @xdr
911  * @length: size of payload, in bytes
912  *
913  * Returns zero on success.
914  *
915  * For the moment, just record the xdr_buf location of the READ
916  * payload. svc_rdma_sendto will use that location later when
917  * we actually send the payload.
918  */
svc_rdma_read_payload(struct svc_rqst * rqstp,unsigned int offset,unsigned int length)919 int svc_rdma_read_payload(struct svc_rqst *rqstp, unsigned int offset,
920 			  unsigned int length)
921 {
922 	struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt;
923 
924 	/* XXX: Just one READ payload slot for now, since our
925 	 * transport implementation currently supports only one
926 	 * Write chunk.
927 	 */
928 	rctxt->rc_read_payload_offset = offset;
929 	rctxt->rc_read_payload_length = length;
930 
931 	return 0;
932 }
933