• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright (c) 2014-2017 Oracle.  All rights reserved.
4  * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the BSD-type
10  * license below:
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  *
16  *      Redistributions of source code must retain the above copyright
17  *      notice, this list of conditions and the following disclaimer.
18  *
19  *      Redistributions in binary form must reproduce the above
20  *      copyright notice, this list of conditions and the following
21  *      disclaimer in the documentation and/or other materials provided
22  *      with the distribution.
23  *
24  *      Neither the name of the Network Appliance, Inc. nor the names of
25  *      its contributors may be used to endorse or promote products
26  *      derived from this software without specific prior written
27  *      permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40  */
41 
42 /*
43  * rpc_rdma.c
44  *
45  * This file contains the guts of the RPC RDMA protocol, and
46  * does marshaling/unmarshaling, etc. It is also where interfacing
47  * to the Linux RPC framework lives.
48  */
49 
50 #include <linux/highmem.h>
51 
52 #include <linux/sunrpc/svc_rdma.h>
53 
54 #include "xprt_rdma.h"
55 #include <trace/events/rpcrdma.h>
56 
57 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
58 # define RPCDBG_FACILITY	RPCDBG_TRANS
59 #endif
60 
61 /* Returns size of largest RPC-over-RDMA header in a Call message
62  *
63  * The largest Call header contains a full-size Read list and a
64  * minimal Reply chunk.
65  */
rpcrdma_max_call_header_size(unsigned int maxsegs)66 static unsigned int rpcrdma_max_call_header_size(unsigned int maxsegs)
67 {
68 	unsigned int size;
69 
70 	/* Fixed header fields and list discriminators */
71 	size = RPCRDMA_HDRLEN_MIN;
72 
73 	/* Maximum Read list size */
74 	size += maxsegs * rpcrdma_readchunk_maxsz * sizeof(__be32);
75 
76 	/* Minimal Read chunk size */
77 	size += sizeof(__be32);	/* segment count */
78 	size += rpcrdma_segment_maxsz * sizeof(__be32);
79 	size += sizeof(__be32);	/* list discriminator */
80 
81 	dprintk("RPC:       %s: max call header size = %u\n",
82 		__func__, size);
83 	return size;
84 }
85 
86 /* Returns size of largest RPC-over-RDMA header in a Reply message
87  *
88  * There is only one Write list or one Reply chunk per Reply
89  * message.  The larger list is the Write list.
90  */
rpcrdma_max_reply_header_size(unsigned int maxsegs)91 static unsigned int rpcrdma_max_reply_header_size(unsigned int maxsegs)
92 {
93 	unsigned int size;
94 
95 	/* Fixed header fields and list discriminators */
96 	size = RPCRDMA_HDRLEN_MIN;
97 
98 	/* Maximum Write list size */
99 	size += sizeof(__be32);		/* segment count */
100 	size += maxsegs * rpcrdma_segment_maxsz * sizeof(__be32);
101 	size += sizeof(__be32);	/* list discriminator */
102 
103 	dprintk("RPC:       %s: max reply header size = %u\n",
104 		__func__, size);
105 	return size;
106 }
107 
108 /**
109  * rpcrdma_set_max_header_sizes - Initialize inline payload sizes
110  * @r_xprt: transport instance to initialize
111  *
112  * The max_inline fields contain the maximum size of an RPC message
113  * so the marshaling code doesn't have to repeat this calculation
114  * for every RPC.
115  */
rpcrdma_set_max_header_sizes(struct rpcrdma_xprt * r_xprt)116 void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *r_xprt)
117 {
118 	unsigned int maxsegs = r_xprt->rx_ia.ri_max_segs;
119 	struct rpcrdma_ep *ep = &r_xprt->rx_ep;
120 
121 	ep->rep_max_inline_send =
122 		ep->rep_inline_send - rpcrdma_max_call_header_size(maxsegs);
123 	ep->rep_max_inline_recv =
124 		ep->rep_inline_recv - rpcrdma_max_reply_header_size(maxsegs);
125 }
126 
127 /* The client can send a request inline as long as the RPCRDMA header
128  * plus the RPC call fit under the transport's inline limit. If the
129  * combined call message size exceeds that limit, the client must use
130  * a Read chunk for this operation.
131  *
132  * A Read chunk is also required if sending the RPC call inline would
133  * exceed this device's max_sge limit.
134  */
rpcrdma_args_inline(struct rpcrdma_xprt * r_xprt,struct rpc_rqst * rqst)135 static bool rpcrdma_args_inline(struct rpcrdma_xprt *r_xprt,
136 				struct rpc_rqst *rqst)
137 {
138 	struct xdr_buf *xdr = &rqst->rq_snd_buf;
139 	unsigned int count, remaining, offset;
140 
141 	if (xdr->len > r_xprt->rx_ep.rep_max_inline_send)
142 		return false;
143 
144 	if (xdr->page_len) {
145 		remaining = xdr->page_len;
146 		offset = offset_in_page(xdr->page_base);
147 		count = RPCRDMA_MIN_SEND_SGES;
148 		while (remaining) {
149 			remaining -= min_t(unsigned int,
150 					   PAGE_SIZE - offset, remaining);
151 			offset = 0;
152 			if (++count > r_xprt->rx_ia.ri_max_send_sges)
153 				return false;
154 		}
155 	}
156 
157 	return true;
158 }
159 
160 /* The client can't know how large the actual reply will be. Thus it
161  * plans for the largest possible reply for that particular ULP
162  * operation. If the maximum combined reply message size exceeds that
163  * limit, the client must provide a write list or a reply chunk for
164  * this request.
165  */
rpcrdma_results_inline(struct rpcrdma_xprt * r_xprt,struct rpc_rqst * rqst)166 static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt,
167 				   struct rpc_rqst *rqst)
168 {
169 	return rqst->rq_rcv_buf.buflen <= r_xprt->rx_ep.rep_max_inline_recv;
170 }
171 
172 /* The client is required to provide a Reply chunk if the maximum
173  * size of the non-payload part of the RPC Reply is larger than
174  * the inline threshold.
175  */
176 static bool
rpcrdma_nonpayload_inline(const struct rpcrdma_xprt * r_xprt,const struct rpc_rqst * rqst)177 rpcrdma_nonpayload_inline(const struct rpcrdma_xprt *r_xprt,
178 			  const struct rpc_rqst *rqst)
179 {
180 	const struct xdr_buf *buf = &rqst->rq_rcv_buf;
181 
182 	return (buf->head[0].iov_len + buf->tail[0].iov_len) <
183 		r_xprt->rx_ep.rep_max_inline_recv;
184 }
185 
186 /* ACL likes to be lazy in allocating pages. For TCP, these
187  * pages can be allocated during receive processing. Not true
188  * for RDMA, which must always provision receive buffers
189  * up front.
190  */
191 static noinline int
rpcrdma_alloc_sparse_pages(struct xdr_buf * buf)192 rpcrdma_alloc_sparse_pages(struct xdr_buf *buf)
193 {
194 	struct page **ppages;
195 	int len;
196 
197 	len = buf->page_len;
198 	ppages = buf->pages + (buf->page_base >> PAGE_SHIFT);
199 	while (len > 0) {
200 		if (!*ppages)
201 			*ppages = alloc_page(GFP_NOWAIT | __GFP_NOWARN);
202 		if (!*ppages)
203 			return -ENOBUFS;
204 		ppages++;
205 		len -= PAGE_SIZE;
206 	}
207 
208 	return 0;
209 }
210 
211 /* Split @vec on page boundaries into SGEs. FMR registers pages, not
212  * a byte range. Other modes coalesce these SGEs into a single MR
213  * when they can.
214  *
215  * Returns pointer to next available SGE, and bumps the total number
216  * of SGEs consumed.
217  */
218 static struct rpcrdma_mr_seg *
rpcrdma_convert_kvec(struct kvec * vec,struct rpcrdma_mr_seg * seg,unsigned int * n)219 rpcrdma_convert_kvec(struct kvec *vec, struct rpcrdma_mr_seg *seg,
220 		     unsigned int *n)
221 {
222 	u32 remaining, page_offset;
223 	char *base;
224 
225 	base = vec->iov_base;
226 	page_offset = offset_in_page(base);
227 	remaining = vec->iov_len;
228 	while (remaining) {
229 		seg->mr_page = NULL;
230 		seg->mr_offset = base;
231 		seg->mr_len = min_t(u32, PAGE_SIZE - page_offset, remaining);
232 		remaining -= seg->mr_len;
233 		base += seg->mr_len;
234 		++seg;
235 		++(*n);
236 		page_offset = 0;
237 	}
238 	return seg;
239 }
240 
241 /* Convert @xdrbuf into SGEs no larger than a page each. As they
242  * are registered, these SGEs are then coalesced into RDMA segments
243  * when the selected memreg mode supports it.
244  *
245  * Returns positive number of SGEs consumed, or a negative errno.
246  */
247 
248 static int
rpcrdma_convert_iovs(struct rpcrdma_xprt * r_xprt,struct xdr_buf * xdrbuf,unsigned int pos,enum rpcrdma_chunktype type,struct rpcrdma_mr_seg * seg)249 rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf,
250 		     unsigned int pos, enum rpcrdma_chunktype type,
251 		     struct rpcrdma_mr_seg *seg)
252 {
253 	unsigned long page_base;
254 	unsigned int len, n;
255 	struct page **ppages;
256 
257 	n = 0;
258 	if (pos == 0)
259 		seg = rpcrdma_convert_kvec(&xdrbuf->head[0], seg, &n);
260 
261 	len = xdrbuf->page_len;
262 	ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT);
263 	page_base = offset_in_page(xdrbuf->page_base);
264 	while (len) {
265 		seg->mr_page = *ppages;
266 		seg->mr_offset = (char *)page_base;
267 		seg->mr_len = min_t(u32, PAGE_SIZE - page_base, len);
268 		len -= seg->mr_len;
269 		++ppages;
270 		++seg;
271 		++n;
272 		page_base = 0;
273 	}
274 
275 	/* When encoding a Read chunk, the tail iovec contains an
276 	 * XDR pad and may be omitted.
277 	 */
278 	if (type == rpcrdma_readch && r_xprt->rx_ia.ri_implicit_roundup)
279 		goto out;
280 
281 	/* When encoding a Write chunk, some servers need to see an
282 	 * extra segment for non-XDR-aligned Write chunks. The upper
283 	 * layer provides space in the tail iovec that may be used
284 	 * for this purpose.
285 	 */
286 	if (type == rpcrdma_writech && r_xprt->rx_ia.ri_implicit_roundup)
287 		goto out;
288 
289 	if (xdrbuf->tail[0].iov_len)
290 		seg = rpcrdma_convert_kvec(&xdrbuf->tail[0], seg, &n);
291 
292 out:
293 	if (unlikely(n > RPCRDMA_MAX_SEGS))
294 		return -EIO;
295 	return n;
296 }
297 
298 static inline int
encode_item_present(struct xdr_stream * xdr)299 encode_item_present(struct xdr_stream *xdr)
300 {
301 	__be32 *p;
302 
303 	p = xdr_reserve_space(xdr, sizeof(*p));
304 	if (unlikely(!p))
305 		return -EMSGSIZE;
306 
307 	*p = xdr_one;
308 	return 0;
309 }
310 
311 static inline int
encode_item_not_present(struct xdr_stream * xdr)312 encode_item_not_present(struct xdr_stream *xdr)
313 {
314 	__be32 *p;
315 
316 	p = xdr_reserve_space(xdr, sizeof(*p));
317 	if (unlikely(!p))
318 		return -EMSGSIZE;
319 
320 	*p = xdr_zero;
321 	return 0;
322 }
323 
324 static void
xdr_encode_rdma_segment(__be32 * iptr,struct rpcrdma_mr * mr)325 xdr_encode_rdma_segment(__be32 *iptr, struct rpcrdma_mr *mr)
326 {
327 	*iptr++ = cpu_to_be32(mr->mr_handle);
328 	*iptr++ = cpu_to_be32(mr->mr_length);
329 	xdr_encode_hyper(iptr, mr->mr_offset);
330 }
331 
332 static int
encode_rdma_segment(struct xdr_stream * xdr,struct rpcrdma_mr * mr)333 encode_rdma_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr)
334 {
335 	__be32 *p;
336 
337 	p = xdr_reserve_space(xdr, 4 * sizeof(*p));
338 	if (unlikely(!p))
339 		return -EMSGSIZE;
340 
341 	xdr_encode_rdma_segment(p, mr);
342 	return 0;
343 }
344 
345 static int
encode_read_segment(struct xdr_stream * xdr,struct rpcrdma_mr * mr,u32 position)346 encode_read_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr,
347 		    u32 position)
348 {
349 	__be32 *p;
350 
351 	p = xdr_reserve_space(xdr, 6 * sizeof(*p));
352 	if (unlikely(!p))
353 		return -EMSGSIZE;
354 
355 	*p++ = xdr_one;			/* Item present */
356 	*p++ = cpu_to_be32(position);
357 	xdr_encode_rdma_segment(p, mr);
358 	return 0;
359 }
360 
rpcrdma_mr_prepare(struct rpcrdma_xprt * r_xprt,struct rpcrdma_req * req,struct rpcrdma_mr_seg * seg,int nsegs,bool writing,struct rpcrdma_mr ** mr)361 static struct rpcrdma_mr_seg *rpcrdma_mr_prepare(struct rpcrdma_xprt *r_xprt,
362 						 struct rpcrdma_req *req,
363 						 struct rpcrdma_mr_seg *seg,
364 						 int nsegs, bool writing,
365 						 struct rpcrdma_mr **mr)
366 {
367 	*mr = rpcrdma_mr_pop(&req->rl_free_mrs);
368 	if (!*mr) {
369 		*mr = rpcrdma_mr_get(r_xprt);
370 		if (!*mr)
371 			goto out_getmr_err;
372 		trace_xprtrdma_mr_get(req);
373 		(*mr)->mr_req = req;
374 	}
375 
376 	rpcrdma_mr_push(*mr, &req->rl_registered);
377 	return frwr_map(r_xprt, seg, nsegs, writing, req->rl_slot.rq_xid, *mr);
378 
379 out_getmr_err:
380 	trace_xprtrdma_nomrs(req);
381 	xprt_wait_for_buffer_space(&r_xprt->rx_xprt);
382 	if (r_xprt->rx_ep.rep_connected != -ENODEV)
383 		schedule_work(&r_xprt->rx_buf.rb_refresh_worker);
384 	return ERR_PTR(-EAGAIN);
385 }
386 
387 /* Register and XDR encode the Read list. Supports encoding a list of read
388  * segments that belong to a single read chunk.
389  *
390  * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
391  *
392  *  Read chunklist (a linked list):
393  *   N elements, position P (same P for all chunks of same arg!):
394  *    1 - PHLOO - 1 - PHLOO - ... - 1 - PHLOO - 0
395  *
396  * Returns zero on success, or a negative errno if a failure occurred.
397  * @xdr is advanced to the next position in the stream.
398  *
399  * Only a single @pos value is currently supported.
400  */
rpcrdma_encode_read_list(struct rpcrdma_xprt * r_xprt,struct rpcrdma_req * req,struct rpc_rqst * rqst,enum rpcrdma_chunktype rtype)401 static int rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt,
402 				    struct rpcrdma_req *req,
403 				    struct rpc_rqst *rqst,
404 				    enum rpcrdma_chunktype rtype)
405 {
406 	struct xdr_stream *xdr = &req->rl_stream;
407 	struct rpcrdma_mr_seg *seg;
408 	struct rpcrdma_mr *mr;
409 	unsigned int pos;
410 	int nsegs;
411 
412 	if (rtype == rpcrdma_noch)
413 		goto done;
414 
415 	pos = rqst->rq_snd_buf.head[0].iov_len;
416 	if (rtype == rpcrdma_areadch)
417 		pos = 0;
418 	seg = req->rl_segments;
419 	nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_snd_buf, pos,
420 				     rtype, seg);
421 	if (nsegs < 0)
422 		return nsegs;
423 
424 	do {
425 		seg = rpcrdma_mr_prepare(r_xprt, req, seg, nsegs, false, &mr);
426 		if (IS_ERR(seg))
427 			return PTR_ERR(seg);
428 
429 		if (encode_read_segment(xdr, mr, pos) < 0)
430 			return -EMSGSIZE;
431 
432 		trace_xprtrdma_chunk_read(rqst->rq_task, pos, mr, nsegs);
433 		r_xprt->rx_stats.read_chunk_count++;
434 		nsegs -= mr->mr_nents;
435 	} while (nsegs);
436 
437 done:
438 	return encode_item_not_present(xdr);
439 }
440 
441 /* Register and XDR encode the Write list. Supports encoding a list
442  * containing one array of plain segments that belong to a single
443  * write chunk.
444  *
445  * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
446  *
447  *  Write chunklist (a list of (one) counted array):
448  *   N elements:
449  *    1 - N - HLOO - HLOO - ... - HLOO - 0
450  *
451  * Returns zero on success, or a negative errno if a failure occurred.
452  * @xdr is advanced to the next position in the stream.
453  *
454  * Only a single Write chunk is currently supported.
455  */
rpcrdma_encode_write_list(struct rpcrdma_xprt * r_xprt,struct rpcrdma_req * req,struct rpc_rqst * rqst,enum rpcrdma_chunktype wtype)456 static int rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt,
457 				     struct rpcrdma_req *req,
458 				     struct rpc_rqst *rqst,
459 				     enum rpcrdma_chunktype wtype)
460 {
461 	struct xdr_stream *xdr = &req->rl_stream;
462 	struct rpcrdma_mr_seg *seg;
463 	struct rpcrdma_mr *mr;
464 	int nsegs, nchunks;
465 	__be32 *segcount;
466 
467 	if (wtype != rpcrdma_writech)
468 		goto done;
469 
470 	seg = req->rl_segments;
471 	nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf,
472 				     rqst->rq_rcv_buf.head[0].iov_len,
473 				     wtype, seg);
474 	if (nsegs < 0)
475 		return nsegs;
476 
477 	if (encode_item_present(xdr) < 0)
478 		return -EMSGSIZE;
479 	segcount = xdr_reserve_space(xdr, sizeof(*segcount));
480 	if (unlikely(!segcount))
481 		return -EMSGSIZE;
482 	/* Actual value encoded below */
483 
484 	nchunks = 0;
485 	do {
486 		seg = rpcrdma_mr_prepare(r_xprt, req, seg, nsegs, true, &mr);
487 		if (IS_ERR(seg))
488 			return PTR_ERR(seg);
489 
490 		if (encode_rdma_segment(xdr, mr) < 0)
491 			return -EMSGSIZE;
492 
493 		trace_xprtrdma_chunk_write(rqst->rq_task, mr, nsegs);
494 		r_xprt->rx_stats.write_chunk_count++;
495 		r_xprt->rx_stats.total_rdma_request += mr->mr_length;
496 		nchunks++;
497 		nsegs -= mr->mr_nents;
498 	} while (nsegs);
499 
500 	/* Update count of segments in this Write chunk */
501 	*segcount = cpu_to_be32(nchunks);
502 
503 done:
504 	return encode_item_not_present(xdr);
505 }
506 
507 /* Register and XDR encode the Reply chunk. Supports encoding an array
508  * of plain segments that belong to a single write (reply) chunk.
509  *
510  * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
511  *
512  *  Reply chunk (a counted array):
513  *   N elements:
514  *    1 - N - HLOO - HLOO - ... - HLOO
515  *
516  * Returns zero on success, or a negative errno if a failure occurred.
517  * @xdr is advanced to the next position in the stream.
518  */
rpcrdma_encode_reply_chunk(struct rpcrdma_xprt * r_xprt,struct rpcrdma_req * req,struct rpc_rqst * rqst,enum rpcrdma_chunktype wtype)519 static int rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt,
520 				      struct rpcrdma_req *req,
521 				      struct rpc_rqst *rqst,
522 				      enum rpcrdma_chunktype wtype)
523 {
524 	struct xdr_stream *xdr = &req->rl_stream;
525 	struct rpcrdma_mr_seg *seg;
526 	struct rpcrdma_mr *mr;
527 	int nsegs, nchunks;
528 	__be32 *segcount;
529 
530 	if (wtype != rpcrdma_replych)
531 		return encode_item_not_present(xdr);
532 
533 	seg = req->rl_segments;
534 	nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, 0, wtype, seg);
535 	if (nsegs < 0)
536 		return nsegs;
537 
538 	if (encode_item_present(xdr) < 0)
539 		return -EMSGSIZE;
540 	segcount = xdr_reserve_space(xdr, sizeof(*segcount));
541 	if (unlikely(!segcount))
542 		return -EMSGSIZE;
543 	/* Actual value encoded below */
544 
545 	nchunks = 0;
546 	do {
547 		seg = rpcrdma_mr_prepare(r_xprt, req, seg, nsegs, true, &mr);
548 		if (IS_ERR(seg))
549 			return PTR_ERR(seg);
550 
551 		if (encode_rdma_segment(xdr, mr) < 0)
552 			return -EMSGSIZE;
553 
554 		trace_xprtrdma_chunk_reply(rqst->rq_task, mr, nsegs);
555 		r_xprt->rx_stats.reply_chunk_count++;
556 		r_xprt->rx_stats.total_rdma_request += mr->mr_length;
557 		nchunks++;
558 		nsegs -= mr->mr_nents;
559 	} while (nsegs);
560 
561 	/* Update count of segments in the Reply chunk */
562 	*segcount = cpu_to_be32(nchunks);
563 
564 	return 0;
565 }
566 
rpcrdma_sendctx_done(struct kref * kref)567 static void rpcrdma_sendctx_done(struct kref *kref)
568 {
569 	struct rpcrdma_req *req =
570 		container_of(kref, struct rpcrdma_req, rl_kref);
571 	struct rpcrdma_rep *rep = req->rl_reply;
572 
573 	rpcrdma_complete_rqst(rep);
574 	rep->rr_rxprt->rx_stats.reply_waits_for_send++;
575 }
576 
577 /**
578  * rpcrdma_sendctx_unmap - DMA-unmap Send buffer
579  * @sc: sendctx containing SGEs to unmap
580  *
581  */
rpcrdma_sendctx_unmap(struct rpcrdma_sendctx * sc)582 void rpcrdma_sendctx_unmap(struct rpcrdma_sendctx *sc)
583 {
584 	struct ib_sge *sge;
585 
586 	if (!sc->sc_unmap_count)
587 		return;
588 
589 	/* The first two SGEs contain the transport header and
590 	 * the inline buffer. These are always left mapped so
591 	 * they can be cheaply re-used.
592 	 */
593 	for (sge = &sc->sc_sges[2]; sc->sc_unmap_count;
594 	     ++sge, --sc->sc_unmap_count)
595 		ib_dma_unmap_page(sc->sc_device, sge->addr, sge->length,
596 				  DMA_TO_DEVICE);
597 
598 	kref_put(&sc->sc_req->rl_kref, rpcrdma_sendctx_done);
599 }
600 
601 /* Prepare an SGE for the RPC-over-RDMA transport header.
602  */
rpcrdma_prepare_hdr_sge(struct rpcrdma_xprt * r_xprt,struct rpcrdma_req * req,u32 len)603 static bool rpcrdma_prepare_hdr_sge(struct rpcrdma_xprt *r_xprt,
604 				    struct rpcrdma_req *req, u32 len)
605 {
606 	struct rpcrdma_sendctx *sc = req->rl_sendctx;
607 	struct rpcrdma_regbuf *rb = req->rl_rdmabuf;
608 	struct ib_sge *sge = sc->sc_sges;
609 
610 	if (!rpcrdma_regbuf_dma_map(r_xprt, rb))
611 		goto out_regbuf;
612 	sge->addr = rdmab_addr(rb);
613 	sge->length = len;
614 	sge->lkey = rdmab_lkey(rb);
615 
616 	ib_dma_sync_single_for_device(rdmab_device(rb), sge->addr, sge->length,
617 				      DMA_TO_DEVICE);
618 	sc->sc_wr.num_sge++;
619 	return true;
620 
621 out_regbuf:
622 	pr_err("rpcrdma: failed to DMA map a Send buffer\n");
623 	return false;
624 }
625 
626 /* Prepare the Send SGEs. The head and tail iovec, and each entry
627  * in the page list, gets its own SGE.
628  */
rpcrdma_prepare_msg_sges(struct rpcrdma_xprt * r_xprt,struct rpcrdma_req * req,struct xdr_buf * xdr,enum rpcrdma_chunktype rtype)629 static bool rpcrdma_prepare_msg_sges(struct rpcrdma_xprt *r_xprt,
630 				     struct rpcrdma_req *req,
631 				     struct xdr_buf *xdr,
632 				     enum rpcrdma_chunktype rtype)
633 {
634 	struct rpcrdma_sendctx *sc = req->rl_sendctx;
635 	unsigned int sge_no, page_base, len, remaining;
636 	struct rpcrdma_regbuf *rb = req->rl_sendbuf;
637 	struct ib_sge *sge = sc->sc_sges;
638 	struct page *page, **ppages;
639 
640 	/* The head iovec is straightforward, as it is already
641 	 * DMA-mapped. Sync the content that has changed.
642 	 */
643 	if (!rpcrdma_regbuf_dma_map(r_xprt, rb))
644 		goto out_regbuf;
645 	sc->sc_device = rdmab_device(rb);
646 	sge_no = 1;
647 	sge[sge_no].addr = rdmab_addr(rb);
648 	sge[sge_no].length = xdr->head[0].iov_len;
649 	sge[sge_no].lkey = rdmab_lkey(rb);
650 	ib_dma_sync_single_for_device(rdmab_device(rb), sge[sge_no].addr,
651 				      sge[sge_no].length, DMA_TO_DEVICE);
652 
653 	/* If there is a Read chunk, the page list is being handled
654 	 * via explicit RDMA, and thus is skipped here. However, the
655 	 * tail iovec may include an XDR pad for the page list, as
656 	 * well as additional content, and may not reside in the
657 	 * same page as the head iovec.
658 	 */
659 	if (rtype == rpcrdma_readch) {
660 		len = xdr->tail[0].iov_len;
661 
662 		/* Do not include the tail if it is only an XDR pad */
663 		if (len < 4)
664 			goto out;
665 
666 		page = virt_to_page(xdr->tail[0].iov_base);
667 		page_base = offset_in_page(xdr->tail[0].iov_base);
668 
669 		/* If the content in the page list is an odd length,
670 		 * xdr_write_pages() has added a pad at the beginning
671 		 * of the tail iovec. Force the tail's non-pad content
672 		 * to land at the next XDR position in the Send message.
673 		 */
674 		page_base += len & 3;
675 		len -= len & 3;
676 		goto map_tail;
677 	}
678 
679 	/* If there is a page list present, temporarily DMA map
680 	 * and prepare an SGE for each page to be sent.
681 	 */
682 	if (xdr->page_len) {
683 		ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
684 		page_base = offset_in_page(xdr->page_base);
685 		remaining = xdr->page_len;
686 		while (remaining) {
687 			sge_no++;
688 			if (sge_no > RPCRDMA_MAX_SEND_SGES - 2)
689 				goto out_mapping_overflow;
690 
691 			len = min_t(u32, PAGE_SIZE - page_base, remaining);
692 			sge[sge_no].addr =
693 				ib_dma_map_page(rdmab_device(rb), *ppages,
694 						page_base, len, DMA_TO_DEVICE);
695 			if (ib_dma_mapping_error(rdmab_device(rb),
696 						 sge[sge_no].addr))
697 				goto out_mapping_err;
698 			sge[sge_no].length = len;
699 			sge[sge_no].lkey = rdmab_lkey(rb);
700 
701 			sc->sc_unmap_count++;
702 			ppages++;
703 			remaining -= len;
704 			page_base = 0;
705 		}
706 	}
707 
708 	/* The tail iovec is not always constructed in the same
709 	 * page where the head iovec resides (see, for example,
710 	 * gss_wrap_req_priv). To neatly accommodate that case,
711 	 * DMA map it separately.
712 	 */
713 	if (xdr->tail[0].iov_len) {
714 		page = virt_to_page(xdr->tail[0].iov_base);
715 		page_base = offset_in_page(xdr->tail[0].iov_base);
716 		len = xdr->tail[0].iov_len;
717 
718 map_tail:
719 		sge_no++;
720 		sge[sge_no].addr =
721 			ib_dma_map_page(rdmab_device(rb), page, page_base, len,
722 					DMA_TO_DEVICE);
723 		if (ib_dma_mapping_error(rdmab_device(rb), sge[sge_no].addr))
724 			goto out_mapping_err;
725 		sge[sge_no].length = len;
726 		sge[sge_no].lkey = rdmab_lkey(rb);
727 		sc->sc_unmap_count++;
728 	}
729 
730 out:
731 	sc->sc_wr.num_sge += sge_no;
732 	if (sc->sc_unmap_count)
733 		kref_get(&req->rl_kref);
734 	return true;
735 
736 out_regbuf:
737 	pr_err("rpcrdma: failed to DMA map a Send buffer\n");
738 	return false;
739 
740 out_mapping_overflow:
741 	rpcrdma_sendctx_unmap(sc);
742 	pr_err("rpcrdma: too many Send SGEs (%u)\n", sge_no);
743 	return false;
744 
745 out_mapping_err:
746 	rpcrdma_sendctx_unmap(sc);
747 	trace_xprtrdma_dma_maperr(sge[sge_no].addr);
748 	return false;
749 }
750 
751 /**
752  * rpcrdma_prepare_send_sges - Construct SGEs for a Send WR
753  * @r_xprt: controlling transport
754  * @req: context of RPC Call being marshalled
755  * @hdrlen: size of transport header, in bytes
756  * @xdr: xdr_buf containing RPC Call
757  * @rtype: chunk type being encoded
758  *
759  * Returns 0 on success; otherwise a negative errno is returned.
760  */
761 int
rpcrdma_prepare_send_sges(struct rpcrdma_xprt * r_xprt,struct rpcrdma_req * req,u32 hdrlen,struct xdr_buf * xdr,enum rpcrdma_chunktype rtype)762 rpcrdma_prepare_send_sges(struct rpcrdma_xprt *r_xprt,
763 			  struct rpcrdma_req *req, u32 hdrlen,
764 			  struct xdr_buf *xdr, enum rpcrdma_chunktype rtype)
765 {
766 	int ret;
767 
768 	ret = -EAGAIN;
769 	req->rl_sendctx = rpcrdma_sendctx_get_locked(r_xprt);
770 	if (!req->rl_sendctx)
771 		goto err;
772 	req->rl_sendctx->sc_wr.num_sge = 0;
773 	req->rl_sendctx->sc_unmap_count = 0;
774 	req->rl_sendctx->sc_req = req;
775 	kref_init(&req->rl_kref);
776 
777 	ret = -EIO;
778 	if (!rpcrdma_prepare_hdr_sge(r_xprt, req, hdrlen))
779 		goto err;
780 	if (rtype != rpcrdma_areadch)
781 		if (!rpcrdma_prepare_msg_sges(r_xprt, req, xdr, rtype))
782 			goto err;
783 	return 0;
784 
785 err:
786 	trace_xprtrdma_prepsend_failed(&req->rl_slot, ret);
787 	return ret;
788 }
789 
790 /**
791  * rpcrdma_marshal_req - Marshal and send one RPC request
792  * @r_xprt: controlling transport
793  * @rqst: RPC request to be marshaled
794  *
795  * For the RPC in "rqst", this function:
796  *  - Chooses the transfer mode (eg., RDMA_MSG or RDMA_NOMSG)
797  *  - Registers Read, Write, and Reply chunks
798  *  - Constructs the transport header
799  *  - Posts a Send WR to send the transport header and request
800  *
801  * Returns:
802  *	%0 if the RPC was sent successfully,
803  *	%-ENOTCONN if the connection was lost,
804  *	%-EAGAIN if the caller should call again with the same arguments,
805  *	%-ENOBUFS if the caller should call again after a delay,
806  *	%-EMSGSIZE if the transport header is too small,
807  *	%-EIO if a permanent problem occurred while marshaling.
808  */
809 int
rpcrdma_marshal_req(struct rpcrdma_xprt * r_xprt,struct rpc_rqst * rqst)810 rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
811 {
812 	struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
813 	struct xdr_stream *xdr = &req->rl_stream;
814 	enum rpcrdma_chunktype rtype, wtype;
815 	bool ddp_allowed;
816 	__be32 *p;
817 	int ret;
818 
819 	if (unlikely(rqst->rq_rcv_buf.flags & XDRBUF_SPARSE_PAGES)) {
820 		ret = rpcrdma_alloc_sparse_pages(&rqst->rq_rcv_buf);
821 		if (ret)
822 			return ret;
823 	}
824 
825 	rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0);
826 	xdr_init_encode(xdr, &req->rl_hdrbuf, rdmab_data(req->rl_rdmabuf),
827 			rqst);
828 
829 	/* Fixed header fields */
830 	ret = -EMSGSIZE;
831 	p = xdr_reserve_space(xdr, 4 * sizeof(*p));
832 	if (!p)
833 		goto out_err;
834 	*p++ = rqst->rq_xid;
835 	*p++ = rpcrdma_version;
836 	*p++ = cpu_to_be32(r_xprt->rx_buf.rb_max_requests);
837 
838 	/* When the ULP employs a GSS flavor that guarantees integrity
839 	 * or privacy, direct data placement of individual data items
840 	 * is not allowed.
841 	 */
842 	ddp_allowed = !(rqst->rq_cred->cr_auth->au_flags &
843 						RPCAUTH_AUTH_DATATOUCH);
844 
845 	/*
846 	 * Chunks needed for results?
847 	 *
848 	 * o If the expected result is under the inline threshold, all ops
849 	 *   return as inline.
850 	 * o Large read ops return data as write chunk(s), header as
851 	 *   inline.
852 	 * o Large non-read ops return as a single reply chunk.
853 	 */
854 	if (rpcrdma_results_inline(r_xprt, rqst))
855 		wtype = rpcrdma_noch;
856 	else if ((ddp_allowed && rqst->rq_rcv_buf.flags & XDRBUF_READ) &&
857 		 rpcrdma_nonpayload_inline(r_xprt, rqst))
858 		wtype = rpcrdma_writech;
859 	else
860 		wtype = rpcrdma_replych;
861 
862 	/*
863 	 * Chunks needed for arguments?
864 	 *
865 	 * o If the total request is under the inline threshold, all ops
866 	 *   are sent as inline.
867 	 * o Large write ops transmit data as read chunk(s), header as
868 	 *   inline.
869 	 * o Large non-write ops are sent with the entire message as a
870 	 *   single read chunk (protocol 0-position special case).
871 	 *
872 	 * This assumes that the upper layer does not present a request
873 	 * that both has a data payload, and whose non-data arguments
874 	 * by themselves are larger than the inline threshold.
875 	 */
876 	if (rpcrdma_args_inline(r_xprt, rqst)) {
877 		*p++ = rdma_msg;
878 		rtype = rpcrdma_noch;
879 	} else if (ddp_allowed && rqst->rq_snd_buf.flags & XDRBUF_WRITE) {
880 		*p++ = rdma_msg;
881 		rtype = rpcrdma_readch;
882 	} else {
883 		r_xprt->rx_stats.nomsg_call_count++;
884 		*p++ = rdma_nomsg;
885 		rtype = rpcrdma_areadch;
886 	}
887 
888 	/* If this is a retransmit, discard previously registered
889 	 * chunks. Very likely the connection has been replaced,
890 	 * so these registrations are invalid and unusable.
891 	 */
892 	frwr_recycle(req);
893 
894 	/* This implementation supports the following combinations
895 	 * of chunk lists in one RPC-over-RDMA Call message:
896 	 *
897 	 *   - Read list
898 	 *   - Write list
899 	 *   - Reply chunk
900 	 *   - Read list + Reply chunk
901 	 *
902 	 * It might not yet support the following combinations:
903 	 *
904 	 *   - Read list + Write list
905 	 *
906 	 * It does not support the following combinations:
907 	 *
908 	 *   - Write list + Reply chunk
909 	 *   - Read list + Write list + Reply chunk
910 	 *
911 	 * This implementation supports only a single chunk in each
912 	 * Read or Write list. Thus for example the client cannot
913 	 * send a Call message with a Position Zero Read chunk and a
914 	 * regular Read chunk at the same time.
915 	 */
916 	ret = rpcrdma_encode_read_list(r_xprt, req, rqst, rtype);
917 	if (ret)
918 		goto out_err;
919 	ret = rpcrdma_encode_write_list(r_xprt, req, rqst, wtype);
920 	if (ret)
921 		goto out_err;
922 	ret = rpcrdma_encode_reply_chunk(r_xprt, req, rqst, wtype);
923 	if (ret)
924 		goto out_err;
925 
926 	ret = rpcrdma_prepare_send_sges(r_xprt, req, req->rl_hdrbuf.len,
927 					&rqst->rq_snd_buf, rtype);
928 	if (ret)
929 		goto out_err;
930 
931 	trace_xprtrdma_marshal(req, rtype, wtype);
932 	return 0;
933 
934 out_err:
935 	trace_xprtrdma_marshal_failed(rqst, ret);
936 	r_xprt->rx_stats.failed_marshal_count++;
937 	frwr_reset(req);
938 	return ret;
939 }
940 
941 /**
942  * rpcrdma_inline_fixup - Scatter inline received data into rqst's iovecs
943  * @rqst: controlling RPC request
944  * @srcp: points to RPC message payload in receive buffer
945  * @copy_len: remaining length of receive buffer content
946  * @pad: Write chunk pad bytes needed (zero for pure inline)
947  *
948  * The upper layer has set the maximum number of bytes it can
949  * receive in each component of rq_rcv_buf. These values are set in
950  * the head.iov_len, page_len, tail.iov_len, and buflen fields.
951  *
952  * Unlike the TCP equivalent (xdr_partial_copy_from_skb), in
953  * many cases this function simply updates iov_base pointers in
954  * rq_rcv_buf to point directly to the received reply data, to
955  * avoid copying reply data.
956  *
957  * Returns the count of bytes which had to be memcopied.
958  */
959 static unsigned long
rpcrdma_inline_fixup(struct rpc_rqst * rqst,char * srcp,int copy_len,int pad)960 rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
961 {
962 	unsigned long fixup_copy_count;
963 	int i, npages, curlen;
964 	char *destp;
965 	struct page **ppages;
966 	int page_base;
967 
968 	/* The head iovec is redirected to the RPC reply message
969 	 * in the receive buffer, to avoid a memcopy.
970 	 */
971 	rqst->rq_rcv_buf.head[0].iov_base = srcp;
972 	rqst->rq_private_buf.head[0].iov_base = srcp;
973 
974 	/* The contents of the receive buffer that follow
975 	 * head.iov_len bytes are copied into the page list.
976 	 */
977 	curlen = rqst->rq_rcv_buf.head[0].iov_len;
978 	if (curlen > copy_len)
979 		curlen = copy_len;
980 	trace_xprtrdma_fixup(rqst, copy_len, curlen);
981 	srcp += curlen;
982 	copy_len -= curlen;
983 
984 	ppages = rqst->rq_rcv_buf.pages +
985 		(rqst->rq_rcv_buf.page_base >> PAGE_SHIFT);
986 	page_base = offset_in_page(rqst->rq_rcv_buf.page_base);
987 	fixup_copy_count = 0;
988 	if (copy_len && rqst->rq_rcv_buf.page_len) {
989 		int pagelist_len;
990 
991 		pagelist_len = rqst->rq_rcv_buf.page_len;
992 		if (pagelist_len > copy_len)
993 			pagelist_len = copy_len;
994 		npages = PAGE_ALIGN(page_base + pagelist_len) >> PAGE_SHIFT;
995 		for (i = 0; i < npages; i++) {
996 			curlen = PAGE_SIZE - page_base;
997 			if (curlen > pagelist_len)
998 				curlen = pagelist_len;
999 
1000 			trace_xprtrdma_fixup_pg(rqst, i, srcp,
1001 						copy_len, curlen);
1002 			destp = kmap_atomic(ppages[i]);
1003 			memcpy(destp + page_base, srcp, curlen);
1004 			flush_dcache_page(ppages[i]);
1005 			kunmap_atomic(destp);
1006 			srcp += curlen;
1007 			copy_len -= curlen;
1008 			fixup_copy_count += curlen;
1009 			pagelist_len -= curlen;
1010 			if (!pagelist_len)
1011 				break;
1012 			page_base = 0;
1013 		}
1014 
1015 		/* Implicit padding for the last segment in a Write
1016 		 * chunk is inserted inline at the front of the tail
1017 		 * iovec. The upper layer ignores the content of
1018 		 * the pad. Simply ensure inline content in the tail
1019 		 * that follows the Write chunk is properly aligned.
1020 		 */
1021 		if (pad)
1022 			srcp -= pad;
1023 	}
1024 
1025 	/* The tail iovec is redirected to the remaining data
1026 	 * in the receive buffer, to avoid a memcopy.
1027 	 */
1028 	if (copy_len || pad) {
1029 		rqst->rq_rcv_buf.tail[0].iov_base = srcp;
1030 		rqst->rq_private_buf.tail[0].iov_base = srcp;
1031 	}
1032 
1033 	return fixup_copy_count;
1034 }
1035 
1036 /* By convention, backchannel calls arrive via rdma_msg type
1037  * messages, and never populate the chunk lists. This makes
1038  * the RPC/RDMA header small and fixed in size, so it is
1039  * straightforward to check the RPC header's direction field.
1040  */
1041 static bool
rpcrdma_is_bcall(struct rpcrdma_xprt * r_xprt,struct rpcrdma_rep * rep)1042 rpcrdma_is_bcall(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
1043 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1044 {
1045 	struct rpc_xprt *xprt = &r_xprt->rx_xprt;
1046 	struct xdr_stream *xdr = &rep->rr_stream;
1047 	__be32 *p;
1048 
1049 	if (rep->rr_proc != rdma_msg)
1050 		return false;
1051 
1052 	/* Peek at stream contents without advancing. */
1053 	p = xdr_inline_decode(xdr, 0);
1054 
1055 	/* Chunk lists */
1056 	if (*p++ != xdr_zero)
1057 		return false;
1058 	if (*p++ != xdr_zero)
1059 		return false;
1060 	if (*p++ != xdr_zero)
1061 		return false;
1062 
1063 	/* RPC header */
1064 	if (*p++ != rep->rr_xid)
1065 		return false;
1066 	if (*p != cpu_to_be32(RPC_CALL))
1067 		return false;
1068 
1069 	/* No bc service. */
1070 	if (xprt->bc_serv == NULL)
1071 		return false;
1072 
1073 	/* Now that we are sure this is a backchannel call,
1074 	 * advance to the RPC header.
1075 	 */
1076 	p = xdr_inline_decode(xdr, 3 * sizeof(*p));
1077 	if (unlikely(!p))
1078 		goto out_short;
1079 
1080 	rpcrdma_bc_receive_call(r_xprt, rep);
1081 	return true;
1082 
1083 out_short:
1084 	pr_warn("RPC/RDMA short backward direction call\n");
1085 	return true;
1086 }
1087 #else	/* CONFIG_SUNRPC_BACKCHANNEL */
1088 {
1089 	return false;
1090 }
1091 #endif	/* CONFIG_SUNRPC_BACKCHANNEL */
1092 
decode_rdma_segment(struct xdr_stream * xdr,u32 * length)1093 static int decode_rdma_segment(struct xdr_stream *xdr, u32 *length)
1094 {
1095 	u32 handle;
1096 	u64 offset;
1097 	__be32 *p;
1098 
1099 	p = xdr_inline_decode(xdr, 4 * sizeof(*p));
1100 	if (unlikely(!p))
1101 		return -EIO;
1102 
1103 	handle = be32_to_cpup(p++);
1104 	*length = be32_to_cpup(p++);
1105 	xdr_decode_hyper(p, &offset);
1106 
1107 	trace_xprtrdma_decode_seg(handle, *length, offset);
1108 	return 0;
1109 }
1110 
decode_write_chunk(struct xdr_stream * xdr,u32 * length)1111 static int decode_write_chunk(struct xdr_stream *xdr, u32 *length)
1112 {
1113 	u32 segcount, seglength;
1114 	__be32 *p;
1115 
1116 	p = xdr_inline_decode(xdr, sizeof(*p));
1117 	if (unlikely(!p))
1118 		return -EIO;
1119 
1120 	*length = 0;
1121 	segcount = be32_to_cpup(p);
1122 	while (segcount--) {
1123 		if (decode_rdma_segment(xdr, &seglength))
1124 			return -EIO;
1125 		*length += seglength;
1126 	}
1127 
1128 	return 0;
1129 }
1130 
1131 /* In RPC-over-RDMA Version One replies, a Read list is never
1132  * expected. This decoder is a stub that returns an error if
1133  * a Read list is present.
1134  */
decode_read_list(struct xdr_stream * xdr)1135 static int decode_read_list(struct xdr_stream *xdr)
1136 {
1137 	__be32 *p;
1138 
1139 	p = xdr_inline_decode(xdr, sizeof(*p));
1140 	if (unlikely(!p))
1141 		return -EIO;
1142 	if (unlikely(*p != xdr_zero))
1143 		return -EIO;
1144 	return 0;
1145 }
1146 
1147 /* Supports only one Write chunk in the Write list
1148  */
decode_write_list(struct xdr_stream * xdr,u32 * length)1149 static int decode_write_list(struct xdr_stream *xdr, u32 *length)
1150 {
1151 	u32 chunklen;
1152 	bool first;
1153 	__be32 *p;
1154 
1155 	*length = 0;
1156 	first = true;
1157 	do {
1158 		p = xdr_inline_decode(xdr, sizeof(*p));
1159 		if (unlikely(!p))
1160 			return -EIO;
1161 		if (*p == xdr_zero)
1162 			break;
1163 		if (!first)
1164 			return -EIO;
1165 
1166 		if (decode_write_chunk(xdr, &chunklen))
1167 			return -EIO;
1168 		*length += chunklen;
1169 		first = false;
1170 	} while (true);
1171 	return 0;
1172 }
1173 
decode_reply_chunk(struct xdr_stream * xdr,u32 * length)1174 static int decode_reply_chunk(struct xdr_stream *xdr, u32 *length)
1175 {
1176 	__be32 *p;
1177 
1178 	p = xdr_inline_decode(xdr, sizeof(*p));
1179 	if (unlikely(!p))
1180 		return -EIO;
1181 
1182 	*length = 0;
1183 	if (*p != xdr_zero)
1184 		if (decode_write_chunk(xdr, length))
1185 			return -EIO;
1186 	return 0;
1187 }
1188 
1189 static int
rpcrdma_decode_msg(struct rpcrdma_xprt * r_xprt,struct rpcrdma_rep * rep,struct rpc_rqst * rqst)1190 rpcrdma_decode_msg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
1191 		   struct rpc_rqst *rqst)
1192 {
1193 	struct xdr_stream *xdr = &rep->rr_stream;
1194 	u32 writelist, replychunk, rpclen;
1195 	char *base;
1196 
1197 	/* Decode the chunk lists */
1198 	if (decode_read_list(xdr))
1199 		return -EIO;
1200 	if (decode_write_list(xdr, &writelist))
1201 		return -EIO;
1202 	if (decode_reply_chunk(xdr, &replychunk))
1203 		return -EIO;
1204 
1205 	/* RDMA_MSG sanity checks */
1206 	if (unlikely(replychunk))
1207 		return -EIO;
1208 
1209 	/* Build the RPC reply's Payload stream in rqst->rq_rcv_buf */
1210 	base = (char *)xdr_inline_decode(xdr, 0);
1211 	rpclen = xdr_stream_remaining(xdr);
1212 	r_xprt->rx_stats.fixup_copy_count +=
1213 		rpcrdma_inline_fixup(rqst, base, rpclen, writelist & 3);
1214 
1215 	r_xprt->rx_stats.total_rdma_reply += writelist;
1216 	return rpclen + xdr_align_size(writelist);
1217 }
1218 
1219 static noinline int
rpcrdma_decode_nomsg(struct rpcrdma_xprt * r_xprt,struct rpcrdma_rep * rep)1220 rpcrdma_decode_nomsg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
1221 {
1222 	struct xdr_stream *xdr = &rep->rr_stream;
1223 	u32 writelist, replychunk;
1224 
1225 	/* Decode the chunk lists */
1226 	if (decode_read_list(xdr))
1227 		return -EIO;
1228 	if (decode_write_list(xdr, &writelist))
1229 		return -EIO;
1230 	if (decode_reply_chunk(xdr, &replychunk))
1231 		return -EIO;
1232 
1233 	/* RDMA_NOMSG sanity checks */
1234 	if (unlikely(writelist))
1235 		return -EIO;
1236 	if (unlikely(!replychunk))
1237 		return -EIO;
1238 
1239 	/* Reply chunk buffer already is the reply vector */
1240 	r_xprt->rx_stats.total_rdma_reply += replychunk;
1241 	return replychunk;
1242 }
1243 
1244 static noinline int
rpcrdma_decode_error(struct rpcrdma_xprt * r_xprt,struct rpcrdma_rep * rep,struct rpc_rqst * rqst)1245 rpcrdma_decode_error(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
1246 		     struct rpc_rqst *rqst)
1247 {
1248 	struct xdr_stream *xdr = &rep->rr_stream;
1249 	__be32 *p;
1250 
1251 	p = xdr_inline_decode(xdr, sizeof(*p));
1252 	if (unlikely(!p))
1253 		return -EIO;
1254 
1255 	switch (*p) {
1256 	case err_vers:
1257 		p = xdr_inline_decode(xdr, 2 * sizeof(*p));
1258 		if (!p)
1259 			break;
1260 		dprintk("RPC:       %s: server reports "
1261 			"version error (%u-%u), xid %08x\n", __func__,
1262 			be32_to_cpup(p), be32_to_cpu(*(p + 1)),
1263 			be32_to_cpu(rep->rr_xid));
1264 		break;
1265 	case err_chunk:
1266 		dprintk("RPC:       %s: server reports "
1267 			"header decoding error, xid %08x\n", __func__,
1268 			be32_to_cpu(rep->rr_xid));
1269 		break;
1270 	default:
1271 		dprintk("RPC:       %s: server reports "
1272 			"unrecognized error %d, xid %08x\n", __func__,
1273 			be32_to_cpup(p), be32_to_cpu(rep->rr_xid));
1274 	}
1275 
1276 	return -EIO;
1277 }
1278 
1279 /* Perform XID lookup, reconstruction of the RPC reply, and
1280  * RPC completion while holding the transport lock to ensure
1281  * the rep, rqst, and rq_task pointers remain stable.
1282  */
rpcrdma_complete_rqst(struct rpcrdma_rep * rep)1283 void rpcrdma_complete_rqst(struct rpcrdma_rep *rep)
1284 {
1285 	struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
1286 	struct rpc_xprt *xprt = &r_xprt->rx_xprt;
1287 	struct rpc_rqst *rqst = rep->rr_rqst;
1288 	int status;
1289 
1290 	switch (rep->rr_proc) {
1291 	case rdma_msg:
1292 		status = rpcrdma_decode_msg(r_xprt, rep, rqst);
1293 		break;
1294 	case rdma_nomsg:
1295 		status = rpcrdma_decode_nomsg(r_xprt, rep);
1296 		break;
1297 	case rdma_error:
1298 		status = rpcrdma_decode_error(r_xprt, rep, rqst);
1299 		break;
1300 	default:
1301 		status = -EIO;
1302 	}
1303 	if (status < 0)
1304 		goto out_badheader;
1305 
1306 out:
1307 	spin_lock(&xprt->queue_lock);
1308 	xprt_complete_rqst(rqst->rq_task, status);
1309 	xprt_unpin_rqst(rqst);
1310 	spin_unlock(&xprt->queue_lock);
1311 	return;
1312 
1313 out_badheader:
1314 	trace_xprtrdma_reply_hdr(rep);
1315 	r_xprt->rx_stats.bad_reply_count++;
1316 	rqst->rq_task->tk_status = status;
1317 	status = 0;
1318 	goto out;
1319 }
1320 
rpcrdma_reply_done(struct kref * kref)1321 static void rpcrdma_reply_done(struct kref *kref)
1322 {
1323 	struct rpcrdma_req *req =
1324 		container_of(kref, struct rpcrdma_req, rl_kref);
1325 
1326 	rpcrdma_complete_rqst(req->rl_reply);
1327 }
1328 
1329 /**
1330  * rpcrdma_reply_handler - Process received RPC/RDMA messages
1331  * @rep: Incoming rpcrdma_rep object to process
1332  *
1333  * Errors must result in the RPC task either being awakened, or
1334  * allowed to timeout, to discover the errors at that time.
1335  */
rpcrdma_reply_handler(struct rpcrdma_rep * rep)1336 void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
1337 {
1338 	struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
1339 	struct rpc_xprt *xprt = &r_xprt->rx_xprt;
1340 	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1341 	struct rpcrdma_req *req;
1342 	struct rpc_rqst *rqst;
1343 	u32 credits;
1344 	__be32 *p;
1345 
1346 	/* Any data means we had a useful conversation, so
1347 	 * then we don't need to delay the next reconnect.
1348 	 */
1349 	if (xprt->reestablish_timeout)
1350 		xprt->reestablish_timeout = 0;
1351 
1352 	/* Fixed transport header fields */
1353 	xdr_init_decode(&rep->rr_stream, &rep->rr_hdrbuf,
1354 			rep->rr_hdrbuf.head[0].iov_base, NULL);
1355 	p = xdr_inline_decode(&rep->rr_stream, 4 * sizeof(*p));
1356 	if (unlikely(!p))
1357 		goto out_shortreply;
1358 	rep->rr_xid = *p++;
1359 	rep->rr_vers = *p++;
1360 	credits = be32_to_cpu(*p++);
1361 	rep->rr_proc = *p++;
1362 
1363 	if (rep->rr_vers != rpcrdma_version)
1364 		goto out_badversion;
1365 
1366 	if (rpcrdma_is_bcall(r_xprt, rep))
1367 		return;
1368 
1369 	/* Match incoming rpcrdma_rep to an rpcrdma_req to
1370 	 * get context for handling any incoming chunks.
1371 	 */
1372 	spin_lock(&xprt->queue_lock);
1373 	rqst = xprt_lookup_rqst(xprt, rep->rr_xid);
1374 	if (!rqst)
1375 		goto out_norqst;
1376 	xprt_pin_rqst(rqst);
1377 	spin_unlock(&xprt->queue_lock);
1378 
1379 	if (credits == 0)
1380 		credits = 1;	/* don't deadlock */
1381 	else if (credits > buf->rb_max_requests)
1382 		credits = buf->rb_max_requests;
1383 	if (buf->rb_credits != credits) {
1384 		spin_lock(&xprt->transport_lock);
1385 		buf->rb_credits = credits;
1386 		xprt->cwnd = credits << RPC_CWNDSHIFT;
1387 		spin_unlock(&xprt->transport_lock);
1388 	}
1389 	rpcrdma_post_recvs(r_xprt, false);
1390 
1391 	req = rpcr_to_rdmar(rqst);
1392 	if (req->rl_reply) {
1393 		trace_xprtrdma_leaked_rep(rqst, req->rl_reply);
1394 		rpcrdma_recv_buffer_put(req->rl_reply);
1395 	}
1396 	req->rl_reply = rep;
1397 	rep->rr_rqst = rqst;
1398 
1399 	trace_xprtrdma_reply(rqst->rq_task, rep, req, credits);
1400 
1401 	if (rep->rr_wc_flags & IB_WC_WITH_INVALIDATE)
1402 		frwr_reminv(rep, &req->rl_registered);
1403 	if (!list_empty(&req->rl_registered))
1404 		frwr_unmap_async(r_xprt, req);
1405 		/* LocalInv completion will complete the RPC */
1406 	else
1407 		kref_put(&req->rl_kref, rpcrdma_reply_done);
1408 	return;
1409 
1410 out_badversion:
1411 	trace_xprtrdma_reply_vers(rep);
1412 	goto out;
1413 
1414 out_norqst:
1415 	spin_unlock(&xprt->queue_lock);
1416 	trace_xprtrdma_reply_rqst(rep);
1417 	goto out;
1418 
1419 out_shortreply:
1420 	trace_xprtrdma_reply_short(rep);
1421 
1422 out:
1423 	rpcrdma_recv_buffer_put(rep);
1424 }
1425