• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright (c) 2016-2018 Oracle. All rights reserved.
4  * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
5  * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the BSD-type
11  * license below:
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  *
17  *      Redistributions of source code must retain the above copyright
18  *      notice, this list of conditions and the following disclaimer.
19  *
20  *      Redistributions in binary form must reproduce the above
21  *      copyright notice, this list of conditions and the following
22  *      disclaimer in the documentation and/or other materials provided
23  *      with the distribution.
24  *
25  *      Neither the name of the Network Appliance, Inc. nor the names of
26  *      its contributors may be used to endorse or promote products
27  *      derived from this software without specific prior written
28  *      permission.
29  *
30  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
33  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
34  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
35  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
36  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
37  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
38  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
39  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
40  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41  *
42  * Author: Tom Tucker <tom@opengridcomputing.com>
43  */
44 
45 /* Operation
46  *
47  * The main entry point is svc_rdma_recvfrom. This is called from
48  * svc_recv when the transport indicates there is incoming data to
49  * be read. "Data Ready" is signaled when an RDMA Receive completes,
50  * or when a set of RDMA Reads complete.
51  *
52  * An svc_rqst is passed in. This structure contains an array of
53  * free pages (rq_pages) that will contain the incoming RPC message.
54  *
55  * Short messages are moved directly into svc_rqst::rq_arg, and
56  * the RPC Call is ready to be processed by the Upper Layer.
57  * svc_rdma_recvfrom returns the length of the RPC Call message,
58  * completing the reception of the RPC Call.
59  *
60  * However, when an incoming message has Read chunks,
61  * svc_rdma_recvfrom must post RDMA Reads to pull the RPC Call's
62  * data payload from the client. svc_rdma_recvfrom sets up the
63  * RDMA Reads using pages in svc_rqst::rq_pages, which are
64  * transferred to an svc_rdma_recv_ctxt for the duration of the
65  * I/O. svc_rdma_recvfrom then returns zero, since the RPC message
66  * is still not yet ready.
67  *
68  * When the Read chunk payloads have become available on the
69  * server, "Data Ready" is raised again, and svc_recv calls
70  * svc_rdma_recvfrom again. This second call may use a different
71  * svc_rqst than the first one, thus any information that needs
72  * to be preserved across these two calls is kept in an
73  * svc_rdma_recv_ctxt.
74  *
75  * The second call to svc_rdma_recvfrom performs final assembly
76  * of the RPC Call message, using the RDMA Read sink pages kept in
77  * the svc_rdma_recv_ctxt. The xdr_buf is copied from the
78  * svc_rdma_recv_ctxt to the second svc_rqst. The second call returns
79  * the length of the completed RPC Call message.
80  *
81  * Page Management
82  *
83  * Pages under I/O must be transferred from the first svc_rqst to an
84  * svc_rdma_recv_ctxt before the first svc_rdma_recvfrom call returns.
85  *
86  * The first svc_rqst supplies pages for RDMA Reads. These are moved
87  * from rqstp::rq_pages into ctxt::pages. The consumed elements of
88  * the rq_pages array are set to NULL and refilled with the first
89  * svc_rdma_recvfrom call returns.
90  *
91  * During the second svc_rdma_recvfrom call, RDMA Read sink pages
92  * are transferred from the svc_rdma_recv_ctxt to the second svc_rqst
93  * (see rdma_read_complete() below).
94  */
95 
96 #include <linux/spinlock.h>
97 #include <asm/unaligned.h>
98 #include <rdma/ib_verbs.h>
99 #include <rdma/rdma_cm.h>
100 
101 #include <linux/sunrpc/xdr.h>
102 #include <linux/sunrpc/debug.h>
103 #include <linux/sunrpc/rpc_rdma.h>
104 #include <linux/sunrpc/svc_rdma.h>
105 
106 #include "xprt_rdma.h"
107 #include <trace/events/rpcrdma.h>
108 
109 #define RPCDBG_FACILITY	RPCDBG_SVCXPRT
110 
111 static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc);
112 
113 static inline struct svc_rdma_recv_ctxt *
svc_rdma_next_recv_ctxt(struct list_head * list)114 svc_rdma_next_recv_ctxt(struct list_head *list)
115 {
116 	return list_first_entry_or_null(list, struct svc_rdma_recv_ctxt,
117 					rc_list);
118 }
119 
120 static struct svc_rdma_recv_ctxt *
svc_rdma_recv_ctxt_alloc(struct svcxprt_rdma * rdma)121 svc_rdma_recv_ctxt_alloc(struct svcxprt_rdma *rdma)
122 {
123 	struct svc_rdma_recv_ctxt *ctxt;
124 	dma_addr_t addr;
125 	void *buffer;
126 
127 	ctxt = kmalloc(sizeof(*ctxt), GFP_KERNEL);
128 	if (!ctxt)
129 		goto fail0;
130 	buffer = kmalloc(rdma->sc_max_req_size, GFP_KERNEL);
131 	if (!buffer)
132 		goto fail1;
133 	addr = ib_dma_map_single(rdma->sc_pd->device, buffer,
134 				 rdma->sc_max_req_size, DMA_FROM_DEVICE);
135 	if (ib_dma_mapping_error(rdma->sc_pd->device, addr))
136 		goto fail2;
137 
138 	ctxt->rc_recv_wr.next = NULL;
139 	ctxt->rc_recv_wr.wr_cqe = &ctxt->rc_cqe;
140 	ctxt->rc_recv_wr.sg_list = &ctxt->rc_recv_sge;
141 	ctxt->rc_recv_wr.num_sge = 1;
142 	ctxt->rc_cqe.done = svc_rdma_wc_receive;
143 	ctxt->rc_recv_sge.addr = addr;
144 	ctxt->rc_recv_sge.length = rdma->sc_max_req_size;
145 	ctxt->rc_recv_sge.lkey = rdma->sc_pd->local_dma_lkey;
146 	ctxt->rc_recv_buf = buffer;
147 	ctxt->rc_temp = false;
148 	return ctxt;
149 
150 fail2:
151 	kfree(buffer);
152 fail1:
153 	kfree(ctxt);
154 fail0:
155 	return NULL;
156 }
157 
svc_rdma_recv_ctxt_destroy(struct svcxprt_rdma * rdma,struct svc_rdma_recv_ctxt * ctxt)158 static void svc_rdma_recv_ctxt_destroy(struct svcxprt_rdma *rdma,
159 				       struct svc_rdma_recv_ctxt *ctxt)
160 {
161 	ib_dma_unmap_single(rdma->sc_pd->device, ctxt->rc_recv_sge.addr,
162 			    ctxt->rc_recv_sge.length, DMA_FROM_DEVICE);
163 	kfree(ctxt->rc_recv_buf);
164 	kfree(ctxt);
165 }
166 
167 /**
168  * svc_rdma_recv_ctxts_destroy - Release all recv_ctxt's for an xprt
169  * @rdma: svcxprt_rdma being torn down
170  *
171  */
svc_rdma_recv_ctxts_destroy(struct svcxprt_rdma * rdma)172 void svc_rdma_recv_ctxts_destroy(struct svcxprt_rdma *rdma)
173 {
174 	struct svc_rdma_recv_ctxt *ctxt;
175 
176 	while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_recv_ctxts))) {
177 		list_del(&ctxt->rc_list);
178 		svc_rdma_recv_ctxt_destroy(rdma, ctxt);
179 	}
180 }
181 
182 static struct svc_rdma_recv_ctxt *
svc_rdma_recv_ctxt_get(struct svcxprt_rdma * rdma)183 svc_rdma_recv_ctxt_get(struct svcxprt_rdma *rdma)
184 {
185 	struct svc_rdma_recv_ctxt *ctxt;
186 
187 	spin_lock(&rdma->sc_recv_lock);
188 	ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_recv_ctxts);
189 	if (!ctxt)
190 		goto out_empty;
191 	list_del(&ctxt->rc_list);
192 	spin_unlock(&rdma->sc_recv_lock);
193 
194 out:
195 	ctxt->rc_page_count = 0;
196 	return ctxt;
197 
198 out_empty:
199 	spin_unlock(&rdma->sc_recv_lock);
200 
201 	ctxt = svc_rdma_recv_ctxt_alloc(rdma);
202 	if (!ctxt)
203 		return NULL;
204 	goto out;
205 }
206 
207 /**
208  * svc_rdma_recv_ctxt_put - Return recv_ctxt to free list
209  * @rdma: controlling svcxprt_rdma
210  * @ctxt: object to return to the free list
211  *
212  */
svc_rdma_recv_ctxt_put(struct svcxprt_rdma * rdma,struct svc_rdma_recv_ctxt * ctxt)213 void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma,
214 			    struct svc_rdma_recv_ctxt *ctxt)
215 {
216 	unsigned int i;
217 
218 	for (i = 0; i < ctxt->rc_page_count; i++)
219 		put_page(ctxt->rc_pages[i]);
220 
221 	if (!ctxt->rc_temp) {
222 		spin_lock(&rdma->sc_recv_lock);
223 		list_add(&ctxt->rc_list, &rdma->sc_recv_ctxts);
224 		spin_unlock(&rdma->sc_recv_lock);
225 	} else
226 		svc_rdma_recv_ctxt_destroy(rdma, ctxt);
227 }
228 
229 /**
230  * svc_rdma_release_rqst - Release transport-specific per-rqst resources
231  * @rqstp: svc_rqst being released
232  *
233  * Ensure that the recv_ctxt is released whether or not a Reply
234  * was sent. For example, the client could close the connection,
235  * or svc_process could drop an RPC, before the Reply is sent.
236  */
svc_rdma_release_rqst(struct svc_rqst * rqstp)237 void svc_rdma_release_rqst(struct svc_rqst *rqstp)
238 {
239 	struct svc_rdma_recv_ctxt *ctxt = rqstp->rq_xprt_ctxt;
240 	struct svc_xprt *xprt = rqstp->rq_xprt;
241 	struct svcxprt_rdma *rdma =
242 		container_of(xprt, struct svcxprt_rdma, sc_xprt);
243 
244 	rqstp->rq_xprt_ctxt = NULL;
245 	if (ctxt)
246 		svc_rdma_recv_ctxt_put(rdma, ctxt);
247 }
248 
__svc_rdma_post_recv(struct svcxprt_rdma * rdma,struct svc_rdma_recv_ctxt * ctxt)249 static int __svc_rdma_post_recv(struct svcxprt_rdma *rdma,
250 				struct svc_rdma_recv_ctxt *ctxt)
251 {
252 	int ret;
253 
254 	svc_xprt_get(&rdma->sc_xprt);
255 	ret = ib_post_recv(rdma->sc_qp, &ctxt->rc_recv_wr, NULL);
256 	trace_svcrdma_post_recv(&ctxt->rc_recv_wr, ret);
257 	if (ret)
258 		goto err_post;
259 	return 0;
260 
261 err_post:
262 	svc_rdma_recv_ctxt_put(rdma, ctxt);
263 	svc_xprt_put(&rdma->sc_xprt);
264 	return ret;
265 }
266 
svc_rdma_post_recv(struct svcxprt_rdma * rdma)267 static int svc_rdma_post_recv(struct svcxprt_rdma *rdma)
268 {
269 	struct svc_rdma_recv_ctxt *ctxt;
270 
271 	if (test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags))
272 		return 0;
273 	ctxt = svc_rdma_recv_ctxt_get(rdma);
274 	if (!ctxt)
275 		return -ENOMEM;
276 	return __svc_rdma_post_recv(rdma, ctxt);
277 }
278 
279 /**
280  * svc_rdma_post_recvs - Post initial set of Recv WRs
281  * @rdma: fresh svcxprt_rdma
282  *
283  * Returns true if successful, otherwise false.
284  */
svc_rdma_post_recvs(struct svcxprt_rdma * rdma)285 bool svc_rdma_post_recvs(struct svcxprt_rdma *rdma)
286 {
287 	struct svc_rdma_recv_ctxt *ctxt;
288 	unsigned int i;
289 	int ret;
290 
291 	for (i = 0; i < rdma->sc_max_requests; i++) {
292 		ctxt = svc_rdma_recv_ctxt_get(rdma);
293 		if (!ctxt)
294 			return false;
295 		ctxt->rc_temp = true;
296 		ret = __svc_rdma_post_recv(rdma, ctxt);
297 		if (ret) {
298 			pr_err("svcrdma: failure posting recv buffers: %d\n",
299 			       ret);
300 			return false;
301 		}
302 	}
303 	return true;
304 }
305 
306 /**
307  * svc_rdma_wc_receive - Invoked by RDMA provider for each polled Receive WC
308  * @cq: Completion Queue context
309  * @wc: Work Completion object
310  *
311  * NB: The svc_xprt/svcxprt_rdma is pinned whenever it's possible that
312  * the Receive completion handler could be running.
313  */
svc_rdma_wc_receive(struct ib_cq * cq,struct ib_wc * wc)314 static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
315 {
316 	struct svcxprt_rdma *rdma = cq->cq_context;
317 	struct ib_cqe *cqe = wc->wr_cqe;
318 	struct svc_rdma_recv_ctxt *ctxt;
319 
320 	trace_svcrdma_wc_receive(wc);
321 
322 	/* WARNING: Only wc->wr_cqe and wc->status are reliable */
323 	ctxt = container_of(cqe, struct svc_rdma_recv_ctxt, rc_cqe);
324 
325 	if (wc->status != IB_WC_SUCCESS)
326 		goto flushed;
327 
328 	if (svc_rdma_post_recv(rdma))
329 		goto post_err;
330 
331 	/* All wc fields are now known to be valid */
332 	ctxt->rc_byte_len = wc->byte_len;
333 	ib_dma_sync_single_for_cpu(rdma->sc_pd->device,
334 				   ctxt->rc_recv_sge.addr,
335 				   wc->byte_len, DMA_FROM_DEVICE);
336 
337 	spin_lock(&rdma->sc_rq_dto_lock);
338 	list_add_tail(&ctxt->rc_list, &rdma->sc_rq_dto_q);
339 	spin_unlock(&rdma->sc_rq_dto_lock);
340 	set_bit(XPT_DATA, &rdma->sc_xprt.xpt_flags);
341 	if (!test_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags))
342 		svc_xprt_enqueue(&rdma->sc_xprt);
343 	goto out;
344 
345 flushed:
346 	if (wc->status != IB_WC_WR_FLUSH_ERR)
347 		pr_err("svcrdma: Recv: %s (%u/0x%x)\n",
348 		       ib_wc_status_msg(wc->status),
349 		       wc->status, wc->vendor_err);
350 post_err:
351 	svc_rdma_recv_ctxt_put(rdma, ctxt);
352 	set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
353 	svc_xprt_enqueue(&rdma->sc_xprt);
354 out:
355 	svc_xprt_put(&rdma->sc_xprt);
356 }
357 
358 /**
359  * svc_rdma_flush_recv_queues - Drain pending Receive work
360  * @rdma: svcxprt_rdma being shut down
361  *
362  */
svc_rdma_flush_recv_queues(struct svcxprt_rdma * rdma)363 void svc_rdma_flush_recv_queues(struct svcxprt_rdma *rdma)
364 {
365 	struct svc_rdma_recv_ctxt *ctxt;
366 
367 	while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_read_complete_q))) {
368 		list_del(&ctxt->rc_list);
369 		svc_rdma_recv_ctxt_put(rdma, ctxt);
370 	}
371 	while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_rq_dto_q))) {
372 		list_del(&ctxt->rc_list);
373 		svc_rdma_recv_ctxt_put(rdma, ctxt);
374 	}
375 }
376 
svc_rdma_build_arg_xdr(struct svc_rqst * rqstp,struct svc_rdma_recv_ctxt * ctxt)377 static void svc_rdma_build_arg_xdr(struct svc_rqst *rqstp,
378 				   struct svc_rdma_recv_ctxt *ctxt)
379 {
380 	struct xdr_buf *arg = &rqstp->rq_arg;
381 
382 	arg->head[0].iov_base = ctxt->rc_recv_buf;
383 	arg->head[0].iov_len = ctxt->rc_byte_len;
384 	arg->tail[0].iov_base = NULL;
385 	arg->tail[0].iov_len = 0;
386 	arg->page_len = 0;
387 	arg->page_base = 0;
388 	arg->buflen = ctxt->rc_byte_len;
389 	arg->len = ctxt->rc_byte_len;
390 }
391 
392 /* This accommodates the largest possible Write chunk,
393  * in one segment.
394  */
395 #define MAX_BYTES_WRITE_SEG	((u32)(RPCSVC_MAXPAGES << PAGE_SHIFT))
396 
397 /* This accommodates the largest possible Position-Zero
398  * Read chunk or Reply chunk, in one segment.
399  */
400 #define MAX_BYTES_SPECIAL_SEG	((u32)((RPCSVC_MAXPAGES + 2) << PAGE_SHIFT))
401 
402 /* Sanity check the Read list.
403  *
404  * Implementation limits:
405  * - This implementation supports only one Read chunk.
406  *
407  * Sanity checks:
408  * - Read list does not overflow buffer.
409  * - Segment size limited by largest NFS data payload.
410  *
411  * The segment count is limited to how many segments can
412  * fit in the transport header without overflowing the
413  * buffer. That's about 40 Read segments for a 1KB inline
414  * threshold.
415  *
416  * Returns pointer to the following Write list.
417  */
xdr_check_read_list(__be32 * p,const __be32 * end)418 static __be32 *xdr_check_read_list(__be32 *p, const __be32 *end)
419 {
420 	u32 position;
421 	bool first;
422 
423 	first = true;
424 	while (*p++ != xdr_zero) {
425 		if (first) {
426 			position = be32_to_cpup(p++);
427 			first = false;
428 		} else if (be32_to_cpup(p++) != position) {
429 			return NULL;
430 		}
431 		p++;	/* handle */
432 		if (be32_to_cpup(p++) > MAX_BYTES_SPECIAL_SEG)
433 			return NULL;
434 		p += 2;	/* offset */
435 
436 		if (p > end)
437 			return NULL;
438 	}
439 	return p;
440 }
441 
442 /* The segment count is limited to how many segments can
443  * fit in the transport header without overflowing the
444  * buffer. That's about 60 Write segments for a 1KB inline
445  * threshold.
446  */
xdr_check_write_chunk(__be32 * p,const __be32 * end,u32 maxlen)447 static __be32 *xdr_check_write_chunk(__be32 *p, const __be32 *end,
448 				     u32 maxlen)
449 {
450 	u32 i, segcount;
451 
452 	segcount = be32_to_cpup(p++);
453 	for (i = 0; i < segcount; i++) {
454 		p++;	/* handle */
455 		if (be32_to_cpup(p++) > maxlen)
456 			return NULL;
457 		p += 2;	/* offset */
458 
459 		if (p > end)
460 			return NULL;
461 	}
462 
463 	return p;
464 }
465 
466 /* Sanity check the Write list.
467  *
468  * Implementation limits:
469  * - This implementation supports only one Write chunk.
470  *
471  * Sanity checks:
472  * - Write list does not overflow buffer.
473  * - Segment size limited by largest NFS data payload.
474  *
475  * Returns pointer to the following Reply chunk.
476  */
xdr_check_write_list(__be32 * p,const __be32 * end)477 static __be32 *xdr_check_write_list(__be32 *p, const __be32 *end)
478 {
479 	u32 chcount;
480 
481 	chcount = 0;
482 	while (*p++ != xdr_zero) {
483 		p = xdr_check_write_chunk(p, end, MAX_BYTES_WRITE_SEG);
484 		if (!p)
485 			return NULL;
486 		if (chcount++ > 1)
487 			return NULL;
488 	}
489 	return p;
490 }
491 
492 /* Sanity check the Reply chunk.
493  *
494  * Sanity checks:
495  * - Reply chunk does not overflow buffer.
496  * - Segment size limited by largest NFS data payload.
497  *
498  * Returns pointer to the following RPC header.
499  */
xdr_check_reply_chunk(__be32 * p,const __be32 * end)500 static __be32 *xdr_check_reply_chunk(__be32 *p, const __be32 *end)
501 {
502 	if (*p++ != xdr_zero) {
503 		p = xdr_check_write_chunk(p, end, MAX_BYTES_SPECIAL_SEG);
504 		if (!p)
505 			return NULL;
506 	}
507 	return p;
508 }
509 
510 /* On entry, xdr->head[0].iov_base points to first byte in the
511  * RPC-over-RDMA header.
512  *
513  * On successful exit, head[0] points to first byte past the
514  * RPC-over-RDMA header. For RDMA_MSG, this is the RPC message.
515  * The length of the RPC-over-RDMA header is returned.
516  *
517  * Assumptions:
518  * - The transport header is entirely contained in the head iovec.
519  */
svc_rdma_xdr_decode_req(struct xdr_buf * rq_arg)520 static int svc_rdma_xdr_decode_req(struct xdr_buf *rq_arg)
521 {
522 	__be32 *p, *end, *rdma_argp;
523 	unsigned int hdr_len;
524 
525 	/* Verify that there's enough bytes for header + something */
526 	if (rq_arg->len <= RPCRDMA_HDRLEN_ERR)
527 		goto out_short;
528 
529 	rdma_argp = rq_arg->head[0].iov_base;
530 	if (*(rdma_argp + 1) != rpcrdma_version)
531 		goto out_version;
532 
533 	switch (*(rdma_argp + 3)) {
534 	case rdma_msg:
535 		break;
536 	case rdma_nomsg:
537 		break;
538 
539 	case rdma_done:
540 		goto out_drop;
541 
542 	case rdma_error:
543 		goto out_drop;
544 
545 	default:
546 		goto out_proc;
547 	}
548 
549 	end = (__be32 *)((unsigned long)rdma_argp + rq_arg->len);
550 	p = xdr_check_read_list(rdma_argp + 4, end);
551 	if (!p)
552 		goto out_inval;
553 	p = xdr_check_write_list(p, end);
554 	if (!p)
555 		goto out_inval;
556 	p = xdr_check_reply_chunk(p, end);
557 	if (!p)
558 		goto out_inval;
559 	if (p > end)
560 		goto out_inval;
561 
562 	rq_arg->head[0].iov_base = p;
563 	hdr_len = (unsigned long)p - (unsigned long)rdma_argp;
564 	rq_arg->head[0].iov_len -= hdr_len;
565 	rq_arg->len -= hdr_len;
566 	trace_svcrdma_decode_rqst(rdma_argp, hdr_len);
567 	return hdr_len;
568 
569 out_short:
570 	trace_svcrdma_decode_short(rq_arg->len);
571 	return -EINVAL;
572 
573 out_version:
574 	trace_svcrdma_decode_badvers(rdma_argp);
575 	return -EPROTONOSUPPORT;
576 
577 out_drop:
578 	trace_svcrdma_decode_drop(rdma_argp);
579 	return 0;
580 
581 out_proc:
582 	trace_svcrdma_decode_badproc(rdma_argp);
583 	return -EINVAL;
584 
585 out_inval:
586 	trace_svcrdma_decode_parse(rdma_argp);
587 	return -EINVAL;
588 }
589 
rdma_read_complete(struct svc_rqst * rqstp,struct svc_rdma_recv_ctxt * head)590 static void rdma_read_complete(struct svc_rqst *rqstp,
591 			       struct svc_rdma_recv_ctxt *head)
592 {
593 	int page_no;
594 
595 	/* Move Read chunk pages to rqstp so that they will be released
596 	 * when svc_process is done with them.
597 	 */
598 	for (page_no = 0; page_no < head->rc_page_count; page_no++) {
599 		put_page(rqstp->rq_pages[page_no]);
600 		rqstp->rq_pages[page_no] = head->rc_pages[page_no];
601 	}
602 	head->rc_page_count = 0;
603 
604 	/* Point rq_arg.pages past header */
605 	rqstp->rq_arg.pages = &rqstp->rq_pages[head->rc_hdr_count];
606 	rqstp->rq_arg.page_len = head->rc_arg.page_len;
607 
608 	/* rq_respages starts after the last arg page */
609 	rqstp->rq_respages = &rqstp->rq_pages[page_no];
610 	rqstp->rq_next_page = rqstp->rq_respages + 1;
611 
612 	/* Rebuild rq_arg head and tail. */
613 	rqstp->rq_arg.head[0] = head->rc_arg.head[0];
614 	rqstp->rq_arg.tail[0] = head->rc_arg.tail[0];
615 	rqstp->rq_arg.len = head->rc_arg.len;
616 	rqstp->rq_arg.buflen = head->rc_arg.buflen;
617 }
618 
svc_rdma_send_error(struct svcxprt_rdma * xprt,__be32 * rdma_argp,int status)619 static void svc_rdma_send_error(struct svcxprt_rdma *xprt,
620 				__be32 *rdma_argp, int status)
621 {
622 	struct svc_rdma_send_ctxt *ctxt;
623 	unsigned int length;
624 	__be32 *p;
625 	int ret;
626 
627 	ctxt = svc_rdma_send_ctxt_get(xprt);
628 	if (!ctxt)
629 		return;
630 
631 	p = ctxt->sc_xprt_buf;
632 	*p++ = *rdma_argp;
633 	*p++ = *(rdma_argp + 1);
634 	*p++ = xprt->sc_fc_credits;
635 	*p++ = rdma_error;
636 	switch (status) {
637 	case -EPROTONOSUPPORT:
638 		*p++ = err_vers;
639 		*p++ = rpcrdma_version;
640 		*p++ = rpcrdma_version;
641 		trace_svcrdma_err_vers(*rdma_argp);
642 		break;
643 	default:
644 		*p++ = err_chunk;
645 		trace_svcrdma_err_chunk(*rdma_argp);
646 	}
647 	length = (unsigned long)p - (unsigned long)ctxt->sc_xprt_buf;
648 	svc_rdma_sync_reply_hdr(xprt, ctxt, length);
649 
650 	ctxt->sc_send_wr.opcode = IB_WR_SEND;
651 	ret = svc_rdma_send(xprt, &ctxt->sc_send_wr);
652 	if (ret)
653 		svc_rdma_send_ctxt_put(xprt, ctxt);
654 }
655 
656 /* By convention, backchannel calls arrive via rdma_msg type
657  * messages, and never populate the chunk lists. This makes
658  * the RPC/RDMA header small and fixed in size, so it is
659  * straightforward to check the RPC header's direction field.
660  */
svc_rdma_is_backchannel_reply(struct svc_xprt * xprt,__be32 * rdma_resp)661 static bool svc_rdma_is_backchannel_reply(struct svc_xprt *xprt,
662 					  __be32 *rdma_resp)
663 {
664 	__be32 *p;
665 
666 	if (!xprt->xpt_bc_xprt)
667 		return false;
668 
669 	p = rdma_resp + 3;
670 	if (*p++ != rdma_msg)
671 		return false;
672 
673 	if (*p++ != xdr_zero)
674 		return false;
675 	if (*p++ != xdr_zero)
676 		return false;
677 	if (*p++ != xdr_zero)
678 		return false;
679 
680 	/* XID sanity */
681 	if (*p++ != *rdma_resp)
682 		return false;
683 	/* call direction */
684 	if (*p == cpu_to_be32(RPC_CALL))
685 		return false;
686 
687 	return true;
688 }
689 
690 /**
691  * svc_rdma_recvfrom - Receive an RPC call
692  * @rqstp: request structure into which to receive an RPC Call
693  *
694  * Returns:
695  *	The positive number of bytes in the RPC Call message,
696  *	%0 if there were no Calls ready to return,
697  *	%-EINVAL if the Read chunk data is too large,
698  *	%-ENOMEM if rdma_rw context pool was exhausted,
699  *	%-ENOTCONN if posting failed (connection is lost),
700  *	%-EIO if rdma_rw initialization failed (DMA mapping, etc).
701  *
702  * Called in a loop when XPT_DATA is set. XPT_DATA is cleared only
703  * when there are no remaining ctxt's to process.
704  *
705  * The next ctxt is removed from the "receive" lists.
706  *
707  * - If the ctxt completes a Read, then finish assembling the Call
708  *   message and return the number of bytes in the message.
709  *
710  * - If the ctxt completes a Receive, then construct the Call
711  *   message from the contents of the Receive buffer.
712  *
713  *   - If there are no Read chunks in this message, then finish
714  *     assembling the Call message and return the number of bytes
715  *     in the message.
716  *
717  *   - If there are Read chunks in this message, post Read WRs to
718  *     pull that payload and return 0.
719  */
svc_rdma_recvfrom(struct svc_rqst * rqstp)720 int svc_rdma_recvfrom(struct svc_rqst *rqstp)
721 {
722 	struct svc_xprt *xprt = rqstp->rq_xprt;
723 	struct svcxprt_rdma *rdma_xprt =
724 		container_of(xprt, struct svcxprt_rdma, sc_xprt);
725 	struct svc_rdma_recv_ctxt *ctxt;
726 	__be32 *p;
727 	int ret;
728 
729 	rqstp->rq_xprt_ctxt = NULL;
730 
731 	spin_lock(&rdma_xprt->sc_rq_dto_lock);
732 	ctxt = svc_rdma_next_recv_ctxt(&rdma_xprt->sc_read_complete_q);
733 	if (ctxt) {
734 		list_del(&ctxt->rc_list);
735 		spin_unlock(&rdma_xprt->sc_rq_dto_lock);
736 		rdma_read_complete(rqstp, ctxt);
737 		goto complete;
738 	}
739 	ctxt = svc_rdma_next_recv_ctxt(&rdma_xprt->sc_rq_dto_q);
740 	if (!ctxt) {
741 		/* No new incoming requests, terminate the loop */
742 		clear_bit(XPT_DATA, &xprt->xpt_flags);
743 		spin_unlock(&rdma_xprt->sc_rq_dto_lock);
744 		return 0;
745 	}
746 	list_del(&ctxt->rc_list);
747 	spin_unlock(&rdma_xprt->sc_rq_dto_lock);
748 
749 	atomic_inc(&rdma_stat_recv);
750 
751 	svc_rdma_build_arg_xdr(rqstp, ctxt);
752 
753 	/* Prevent svc_xprt_release from releasing pages in rq_pages
754 	 * if we return 0 or an error.
755 	 */
756 	rqstp->rq_respages = rqstp->rq_pages;
757 	rqstp->rq_next_page = rqstp->rq_respages;
758 
759 	p = (__be32 *)rqstp->rq_arg.head[0].iov_base;
760 	ret = svc_rdma_xdr_decode_req(&rqstp->rq_arg);
761 	if (ret < 0)
762 		goto out_err;
763 	if (ret == 0)
764 		goto out_drop;
765 	rqstp->rq_xprt_hlen = ret;
766 
767 	if (svc_rdma_is_backchannel_reply(xprt, p)) {
768 		ret = svc_rdma_handle_bc_reply(xprt->xpt_bc_xprt, p,
769 					       &rqstp->rq_arg);
770 		svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
771 		return ret;
772 	}
773 
774 	p += rpcrdma_fixed_maxsz;
775 	if (*p != xdr_zero)
776 		goto out_readchunk;
777 
778 complete:
779 	rqstp->rq_xprt_ctxt = ctxt;
780 	rqstp->rq_prot = IPPROTO_MAX;
781 	svc_xprt_copy_addrs(rqstp, xprt);
782 	return rqstp->rq_arg.len;
783 
784 out_readchunk:
785 	ret = svc_rdma_recv_read_chunk(rdma_xprt, rqstp, ctxt, p);
786 	if (ret < 0)
787 		goto out_postfail;
788 	return 0;
789 
790 out_err:
791 	svc_rdma_send_error(rdma_xprt, p, ret);
792 	svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
793 	return 0;
794 
795 out_postfail:
796 	if (ret == -EINVAL)
797 		svc_rdma_send_error(rdma_xprt, p, ret);
798 	svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
799 	return ret;
800 
801 out_drop:
802 	svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
803 	return 0;
804 }
805