1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3 * Copyright (c) 2016-2018 Oracle. All rights reserved.
4 * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
5 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the BSD-type
11 * license below:
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 *
17 * Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 *
20 * Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials provided
23 * with the distribution.
24 *
25 * Neither the name of the Network Appliance, Inc. nor the names of
26 * its contributors may be used to endorse or promote products
27 * derived from this software without specific prior written
28 * permission.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
33 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
34 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
35 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
36 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
37 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
38 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
39 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
40 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 *
42 * Author: Tom Tucker <tom@opengridcomputing.com>
43 */
44
45 /* Operation
46 *
47 * The main entry point is svc_rdma_recvfrom. This is called from
48 * svc_recv when the transport indicates there is incoming data to
49 * be read. "Data Ready" is signaled when an RDMA Receive completes,
50 * or when a set of RDMA Reads complete.
51 *
52 * An svc_rqst is passed in. This structure contains an array of
53 * free pages (rq_pages) that will contain the incoming RPC message.
54 *
55 * Short messages are moved directly into svc_rqst::rq_arg, and
56 * the RPC Call is ready to be processed by the Upper Layer.
57 * svc_rdma_recvfrom returns the length of the RPC Call message,
58 * completing the reception of the RPC Call.
59 *
60 * However, when an incoming message has Read chunks,
61 * svc_rdma_recvfrom must post RDMA Reads to pull the RPC Call's
62 * data payload from the client. svc_rdma_recvfrom sets up the
63 * RDMA Reads using pages in svc_rqst::rq_pages, which are
64 * transferred to an svc_rdma_recv_ctxt for the duration of the
65 * I/O. svc_rdma_recvfrom then returns zero, since the RPC message
66 * is still not yet ready.
67 *
68 * When the Read chunk payloads have become available on the
69 * server, "Data Ready" is raised again, and svc_recv calls
70 * svc_rdma_recvfrom again. This second call may use a different
71 * svc_rqst than the first one, thus any information that needs
72 * to be preserved across these two calls is kept in an
73 * svc_rdma_recv_ctxt.
74 *
75 * The second call to svc_rdma_recvfrom performs final assembly
76 * of the RPC Call message, using the RDMA Read sink pages kept in
77 * the svc_rdma_recv_ctxt. The xdr_buf is copied from the
78 * svc_rdma_recv_ctxt to the second svc_rqst. The second call returns
79 * the length of the completed RPC Call message.
80 *
81 * Page Management
82 *
83 * Pages under I/O must be transferred from the first svc_rqst to an
84 * svc_rdma_recv_ctxt before the first svc_rdma_recvfrom call returns.
85 *
86 * The first svc_rqst supplies pages for RDMA Reads. These are moved
87 * from rqstp::rq_pages into ctxt::pages. The consumed elements of
88 * the rq_pages array are set to NULL and refilled with the first
89 * svc_rdma_recvfrom call returns.
90 *
91 * During the second svc_rdma_recvfrom call, RDMA Read sink pages
92 * are transferred from the svc_rdma_recv_ctxt to the second svc_rqst.
93 */
94
95 #include <linux/slab.h>
96 #include <linux/spinlock.h>
97 #include <asm/unaligned.h>
98 #include <rdma/ib_verbs.h>
99 #include <rdma/rdma_cm.h>
100
101 #include <linux/sunrpc/xdr.h>
102 #include <linux/sunrpc/debug.h>
103 #include <linux/sunrpc/rpc_rdma.h>
104 #include <linux/sunrpc/svc_rdma.h>
105
106 #include "xprt_rdma.h"
107 #include <trace/events/rpcrdma.h>
108
109 static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc);
110
111 static inline struct svc_rdma_recv_ctxt *
svc_rdma_next_recv_ctxt(struct list_head * list)112 svc_rdma_next_recv_ctxt(struct list_head *list)
113 {
114 return list_first_entry_or_null(list, struct svc_rdma_recv_ctxt,
115 rc_list);
116 }
117
svc_rdma_recv_cid_init(struct svcxprt_rdma * rdma,struct rpc_rdma_cid * cid)118 static void svc_rdma_recv_cid_init(struct svcxprt_rdma *rdma,
119 struct rpc_rdma_cid *cid)
120 {
121 cid->ci_queue_id = rdma->sc_rq_cq->res.id;
122 cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids);
123 }
124
125 static struct svc_rdma_recv_ctxt *
svc_rdma_recv_ctxt_alloc(struct svcxprt_rdma * rdma)126 svc_rdma_recv_ctxt_alloc(struct svcxprt_rdma *rdma)
127 {
128 struct svc_rdma_recv_ctxt *ctxt;
129 dma_addr_t addr;
130 void *buffer;
131
132 ctxt = kmalloc(sizeof(*ctxt), GFP_KERNEL);
133 if (!ctxt)
134 goto fail0;
135 buffer = kmalloc(rdma->sc_max_req_size, GFP_KERNEL);
136 if (!buffer)
137 goto fail1;
138 addr = ib_dma_map_single(rdma->sc_pd->device, buffer,
139 rdma->sc_max_req_size, DMA_FROM_DEVICE);
140 if (ib_dma_mapping_error(rdma->sc_pd->device, addr))
141 goto fail2;
142
143 svc_rdma_recv_cid_init(rdma, &ctxt->rc_cid);
144 pcl_init(&ctxt->rc_call_pcl);
145 pcl_init(&ctxt->rc_read_pcl);
146 pcl_init(&ctxt->rc_write_pcl);
147 pcl_init(&ctxt->rc_reply_pcl);
148
149 ctxt->rc_recv_wr.next = NULL;
150 ctxt->rc_recv_wr.wr_cqe = &ctxt->rc_cqe;
151 ctxt->rc_recv_wr.sg_list = &ctxt->rc_recv_sge;
152 ctxt->rc_recv_wr.num_sge = 1;
153 ctxt->rc_cqe.done = svc_rdma_wc_receive;
154 ctxt->rc_recv_sge.addr = addr;
155 ctxt->rc_recv_sge.length = rdma->sc_max_req_size;
156 ctxt->rc_recv_sge.lkey = rdma->sc_pd->local_dma_lkey;
157 ctxt->rc_recv_buf = buffer;
158 ctxt->rc_temp = false;
159 return ctxt;
160
161 fail2:
162 kfree(buffer);
163 fail1:
164 kfree(ctxt);
165 fail0:
166 return NULL;
167 }
168
svc_rdma_recv_ctxt_destroy(struct svcxprt_rdma * rdma,struct svc_rdma_recv_ctxt * ctxt)169 static void svc_rdma_recv_ctxt_destroy(struct svcxprt_rdma *rdma,
170 struct svc_rdma_recv_ctxt *ctxt)
171 {
172 ib_dma_unmap_single(rdma->sc_pd->device, ctxt->rc_recv_sge.addr,
173 ctxt->rc_recv_sge.length, DMA_FROM_DEVICE);
174 kfree(ctxt->rc_recv_buf);
175 kfree(ctxt);
176 }
177
178 /**
179 * svc_rdma_recv_ctxts_destroy - Release all recv_ctxt's for an xprt
180 * @rdma: svcxprt_rdma being torn down
181 *
182 */
svc_rdma_recv_ctxts_destroy(struct svcxprt_rdma * rdma)183 void svc_rdma_recv_ctxts_destroy(struct svcxprt_rdma *rdma)
184 {
185 struct svc_rdma_recv_ctxt *ctxt;
186 struct llist_node *node;
187
188 while ((node = llist_del_first(&rdma->sc_recv_ctxts))) {
189 ctxt = llist_entry(node, struct svc_rdma_recv_ctxt, rc_node);
190 svc_rdma_recv_ctxt_destroy(rdma, ctxt);
191 }
192 }
193
194 /**
195 * svc_rdma_recv_ctxt_get - Allocate a recv_ctxt
196 * @rdma: controlling svcxprt_rdma
197 *
198 * Returns a recv_ctxt or (rarely) NULL if none are available.
199 */
svc_rdma_recv_ctxt_get(struct svcxprt_rdma * rdma)200 struct svc_rdma_recv_ctxt *svc_rdma_recv_ctxt_get(struct svcxprt_rdma *rdma)
201 {
202 struct svc_rdma_recv_ctxt *ctxt;
203 struct llist_node *node;
204
205 node = llist_del_first(&rdma->sc_recv_ctxts);
206 if (!node)
207 goto out_empty;
208 ctxt = llist_entry(node, struct svc_rdma_recv_ctxt, rc_node);
209
210 out:
211 ctxt->rc_page_count = 0;
212 return ctxt;
213
214 out_empty:
215 ctxt = svc_rdma_recv_ctxt_alloc(rdma);
216 if (!ctxt)
217 return NULL;
218 goto out;
219 }
220
221 /**
222 * svc_rdma_recv_ctxt_put - Return recv_ctxt to free list
223 * @rdma: controlling svcxprt_rdma
224 * @ctxt: object to return to the free list
225 *
226 */
svc_rdma_recv_ctxt_put(struct svcxprt_rdma * rdma,struct svc_rdma_recv_ctxt * ctxt)227 void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma,
228 struct svc_rdma_recv_ctxt *ctxt)
229 {
230 pcl_free(&ctxt->rc_call_pcl);
231 pcl_free(&ctxt->rc_read_pcl);
232 pcl_free(&ctxt->rc_write_pcl);
233 pcl_free(&ctxt->rc_reply_pcl);
234
235 if (!ctxt->rc_temp)
236 llist_add(&ctxt->rc_node, &rdma->sc_recv_ctxts);
237 else
238 svc_rdma_recv_ctxt_destroy(rdma, ctxt);
239 }
240
241 /**
242 * svc_rdma_release_ctxt - Release transport-specific per-rqst resources
243 * @xprt: the transport which owned the context
244 * @vctxt: the context from rqstp->rq_xprt_ctxt or dr->xprt_ctxt
245 *
246 * Ensure that the recv_ctxt is released whether or not a Reply
247 * was sent. For example, the client could close the connection,
248 * or svc_process could drop an RPC, before the Reply is sent.
249 */
svc_rdma_release_ctxt(struct svc_xprt * xprt,void * vctxt)250 void svc_rdma_release_ctxt(struct svc_xprt *xprt, void *vctxt)
251 {
252 struct svc_rdma_recv_ctxt *ctxt = vctxt;
253 struct svcxprt_rdma *rdma =
254 container_of(xprt, struct svcxprt_rdma, sc_xprt);
255
256 if (ctxt)
257 svc_rdma_recv_ctxt_put(rdma, ctxt);
258 }
259
svc_rdma_refresh_recvs(struct svcxprt_rdma * rdma,unsigned int wanted,bool temp)260 static bool svc_rdma_refresh_recvs(struct svcxprt_rdma *rdma,
261 unsigned int wanted, bool temp)
262 {
263 const struct ib_recv_wr *bad_wr = NULL;
264 struct svc_rdma_recv_ctxt *ctxt;
265 struct ib_recv_wr *recv_chain;
266 int ret;
267
268 if (test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags))
269 return false;
270
271 recv_chain = NULL;
272 while (wanted--) {
273 ctxt = svc_rdma_recv_ctxt_get(rdma);
274 if (!ctxt)
275 break;
276
277 trace_svcrdma_post_recv(ctxt);
278 ctxt->rc_temp = temp;
279 ctxt->rc_recv_wr.next = recv_chain;
280 recv_chain = &ctxt->rc_recv_wr;
281 rdma->sc_pending_recvs++;
282 }
283 if (!recv_chain)
284 return false;
285
286 ret = ib_post_recv(rdma->sc_qp, recv_chain, &bad_wr);
287 if (ret)
288 goto err_free;
289 return true;
290
291 err_free:
292 trace_svcrdma_rq_post_err(rdma, ret);
293 while (bad_wr) {
294 ctxt = container_of(bad_wr, struct svc_rdma_recv_ctxt,
295 rc_recv_wr);
296 bad_wr = bad_wr->next;
297 svc_rdma_recv_ctxt_put(rdma, ctxt);
298 }
299 /* Since we're destroying the xprt, no need to reset
300 * sc_pending_recvs. */
301 return false;
302 }
303
304 /**
305 * svc_rdma_post_recvs - Post initial set of Recv WRs
306 * @rdma: fresh svcxprt_rdma
307 *
308 * Returns true if successful, otherwise false.
309 */
svc_rdma_post_recvs(struct svcxprt_rdma * rdma)310 bool svc_rdma_post_recvs(struct svcxprt_rdma *rdma)
311 {
312 return svc_rdma_refresh_recvs(rdma, rdma->sc_max_requests, true);
313 }
314
315 /**
316 * svc_rdma_wc_receive - Invoked by RDMA provider for each polled Receive WC
317 * @cq: Completion Queue context
318 * @wc: Work Completion object
319 *
320 */
svc_rdma_wc_receive(struct ib_cq * cq,struct ib_wc * wc)321 static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
322 {
323 struct svcxprt_rdma *rdma = cq->cq_context;
324 struct ib_cqe *cqe = wc->wr_cqe;
325 struct svc_rdma_recv_ctxt *ctxt;
326
327 rdma->sc_pending_recvs--;
328
329 /* WARNING: Only wc->wr_cqe and wc->status are reliable */
330 ctxt = container_of(cqe, struct svc_rdma_recv_ctxt, rc_cqe);
331
332 trace_svcrdma_wc_receive(wc, &ctxt->rc_cid);
333 if (wc->status != IB_WC_SUCCESS)
334 goto flushed;
335
336 /* If receive posting fails, the connection is about to be
337 * lost anyway. The server will not be able to send a reply
338 * for this RPC, and the client will retransmit this RPC
339 * anyway when it reconnects.
340 *
341 * Therefore we drop the Receive, even if status was SUCCESS
342 * to reduce the likelihood of replayed requests once the
343 * client reconnects.
344 */
345 if (rdma->sc_pending_recvs < rdma->sc_max_requests)
346 if (!svc_rdma_refresh_recvs(rdma, rdma->sc_recv_batch, false))
347 goto flushed;
348
349 /* All wc fields are now known to be valid */
350 ctxt->rc_byte_len = wc->byte_len;
351
352 spin_lock(&rdma->sc_rq_dto_lock);
353 list_add_tail(&ctxt->rc_list, &rdma->sc_rq_dto_q);
354 /* Note the unlock pairs with the smp_rmb in svc_xprt_ready: */
355 set_bit(XPT_DATA, &rdma->sc_xprt.xpt_flags);
356 spin_unlock(&rdma->sc_rq_dto_lock);
357 if (!test_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags))
358 svc_xprt_enqueue(&rdma->sc_xprt);
359 return;
360
361 flushed:
362 svc_rdma_recv_ctxt_put(rdma, ctxt);
363 svc_xprt_deferred_close(&rdma->sc_xprt);
364 }
365
366 /**
367 * svc_rdma_flush_recv_queues - Drain pending Receive work
368 * @rdma: svcxprt_rdma being shut down
369 *
370 */
svc_rdma_flush_recv_queues(struct svcxprt_rdma * rdma)371 void svc_rdma_flush_recv_queues(struct svcxprt_rdma *rdma)
372 {
373 struct svc_rdma_recv_ctxt *ctxt;
374
375 while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_rq_dto_q))) {
376 list_del(&ctxt->rc_list);
377 svc_rdma_recv_ctxt_put(rdma, ctxt);
378 }
379 }
380
svc_rdma_build_arg_xdr(struct svc_rqst * rqstp,struct svc_rdma_recv_ctxt * ctxt)381 static void svc_rdma_build_arg_xdr(struct svc_rqst *rqstp,
382 struct svc_rdma_recv_ctxt *ctxt)
383 {
384 struct xdr_buf *arg = &rqstp->rq_arg;
385
386 arg->head[0].iov_base = ctxt->rc_recv_buf;
387 arg->head[0].iov_len = ctxt->rc_byte_len;
388 arg->tail[0].iov_base = NULL;
389 arg->tail[0].iov_len = 0;
390 arg->page_len = 0;
391 arg->page_base = 0;
392 arg->buflen = ctxt->rc_byte_len;
393 arg->len = ctxt->rc_byte_len;
394 }
395
396 /**
397 * xdr_count_read_segments - Count number of Read segments in Read list
398 * @rctxt: Ingress receive context
399 * @p: Start of an un-decoded Read list
400 *
401 * Before allocating anything, ensure the ingress Read list is safe
402 * to use.
403 *
404 * The segment count is limited to how many segments can fit in the
405 * transport header without overflowing the buffer. That's about 40
406 * Read segments for a 1KB inline threshold.
407 *
408 * Return values:
409 * %true: Read list is valid. @rctxt's xdr_stream is updated to point
410 * to the first byte past the Read list. rc_read_pcl and
411 * rc_call_pcl cl_count fields are set to the number of
412 * Read segments in the list.
413 * %false: Read list is corrupt. @rctxt's xdr_stream is left in an
414 * unknown state.
415 */
xdr_count_read_segments(struct svc_rdma_recv_ctxt * rctxt,__be32 * p)416 static bool xdr_count_read_segments(struct svc_rdma_recv_ctxt *rctxt, __be32 *p)
417 {
418 rctxt->rc_call_pcl.cl_count = 0;
419 rctxt->rc_read_pcl.cl_count = 0;
420 while (xdr_item_is_present(p)) {
421 u32 position, handle, length;
422 u64 offset;
423
424 p = xdr_inline_decode(&rctxt->rc_stream,
425 rpcrdma_readseg_maxsz * sizeof(*p));
426 if (!p)
427 return false;
428
429 xdr_decode_read_segment(p, &position, &handle,
430 &length, &offset);
431 if (position) {
432 if (position & 3)
433 return false;
434 ++rctxt->rc_read_pcl.cl_count;
435 } else {
436 ++rctxt->rc_call_pcl.cl_count;
437 }
438
439 p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p));
440 if (!p)
441 return false;
442 }
443 return true;
444 }
445
446 /* Sanity check the Read list.
447 *
448 * Sanity checks:
449 * - Read list does not overflow Receive buffer.
450 * - Chunk size limited by largest NFS data payload.
451 *
452 * Return values:
453 * %true: Read list is valid. @rctxt's xdr_stream is updated
454 * to point to the first byte past the Read list.
455 * %false: Read list is corrupt. @rctxt's xdr_stream is left
456 * in an unknown state.
457 */
xdr_check_read_list(struct svc_rdma_recv_ctxt * rctxt)458 static bool xdr_check_read_list(struct svc_rdma_recv_ctxt *rctxt)
459 {
460 __be32 *p;
461
462 p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p));
463 if (!p)
464 return false;
465 if (!xdr_count_read_segments(rctxt, p))
466 return false;
467 if (!pcl_alloc_call(rctxt, p))
468 return false;
469 return pcl_alloc_read(rctxt, p);
470 }
471
xdr_check_write_chunk(struct svc_rdma_recv_ctxt * rctxt)472 static bool xdr_check_write_chunk(struct svc_rdma_recv_ctxt *rctxt)
473 {
474 u32 segcount;
475 __be32 *p;
476
477 if (xdr_stream_decode_u32(&rctxt->rc_stream, &segcount))
478 return false;
479
480 /* A bogus segcount causes this buffer overflow check to fail. */
481 p = xdr_inline_decode(&rctxt->rc_stream,
482 segcount * rpcrdma_segment_maxsz * sizeof(*p));
483 return p != NULL;
484 }
485
486 /**
487 * xdr_count_write_chunks - Count number of Write chunks in Write list
488 * @rctxt: Received header and decoding state
489 * @p: start of an un-decoded Write list
490 *
491 * Before allocating anything, ensure the ingress Write list is
492 * safe to use.
493 *
494 * Return values:
495 * %true: Write list is valid. @rctxt's xdr_stream is updated
496 * to point to the first byte past the Write list, and
497 * the number of Write chunks is in rc_write_pcl.cl_count.
498 * %false: Write list is corrupt. @rctxt's xdr_stream is left
499 * in an indeterminate state.
500 */
xdr_count_write_chunks(struct svc_rdma_recv_ctxt * rctxt,__be32 * p)501 static bool xdr_count_write_chunks(struct svc_rdma_recv_ctxt *rctxt, __be32 *p)
502 {
503 rctxt->rc_write_pcl.cl_count = 0;
504 while (xdr_item_is_present(p)) {
505 if (!xdr_check_write_chunk(rctxt))
506 return false;
507 ++rctxt->rc_write_pcl.cl_count;
508 p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p));
509 if (!p)
510 return false;
511 }
512 return true;
513 }
514
515 /* Sanity check the Write list.
516 *
517 * Implementation limits:
518 * - This implementation currently supports only one Write chunk.
519 *
520 * Sanity checks:
521 * - Write list does not overflow Receive buffer.
522 * - Chunk size limited by largest NFS data payload.
523 *
524 * Return values:
525 * %true: Write list is valid. @rctxt's xdr_stream is updated
526 * to point to the first byte past the Write list.
527 * %false: Write list is corrupt. @rctxt's xdr_stream is left
528 * in an unknown state.
529 */
xdr_check_write_list(struct svc_rdma_recv_ctxt * rctxt)530 static bool xdr_check_write_list(struct svc_rdma_recv_ctxt *rctxt)
531 {
532 __be32 *p;
533
534 p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p));
535 if (!p)
536 return false;
537 if (!xdr_count_write_chunks(rctxt, p))
538 return false;
539 if (!pcl_alloc_write(rctxt, &rctxt->rc_write_pcl, p))
540 return false;
541
542 rctxt->rc_cur_result_payload = pcl_first_chunk(&rctxt->rc_write_pcl);
543 return true;
544 }
545
546 /* Sanity check the Reply chunk.
547 *
548 * Sanity checks:
549 * - Reply chunk does not overflow Receive buffer.
550 * - Chunk size limited by largest NFS data payload.
551 *
552 * Return values:
553 * %true: Reply chunk is valid. @rctxt's xdr_stream is updated
554 * to point to the first byte past the Reply chunk.
555 * %false: Reply chunk is corrupt. @rctxt's xdr_stream is left
556 * in an unknown state.
557 */
xdr_check_reply_chunk(struct svc_rdma_recv_ctxt * rctxt)558 static bool xdr_check_reply_chunk(struct svc_rdma_recv_ctxt *rctxt)
559 {
560 __be32 *p;
561
562 p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p));
563 if (!p)
564 return false;
565
566 if (!xdr_item_is_present(p))
567 return true;
568 if (!xdr_check_write_chunk(rctxt))
569 return false;
570
571 rctxt->rc_reply_pcl.cl_count = 1;
572 return pcl_alloc_write(rctxt, &rctxt->rc_reply_pcl, p);
573 }
574
575 /* RPC-over-RDMA Version One private extension: Remote Invalidation.
576 * Responder's choice: requester signals it can handle Send With
577 * Invalidate, and responder chooses one R_key to invalidate.
578 *
579 * If there is exactly one distinct R_key in the received transport
580 * header, set rc_inv_rkey to that R_key. Otherwise, set it to zero.
581 */
svc_rdma_get_inv_rkey(struct svcxprt_rdma * rdma,struct svc_rdma_recv_ctxt * ctxt)582 static void svc_rdma_get_inv_rkey(struct svcxprt_rdma *rdma,
583 struct svc_rdma_recv_ctxt *ctxt)
584 {
585 struct svc_rdma_segment *segment;
586 struct svc_rdma_chunk *chunk;
587 u32 inv_rkey;
588
589 ctxt->rc_inv_rkey = 0;
590
591 if (!rdma->sc_snd_w_inv)
592 return;
593
594 inv_rkey = 0;
595 pcl_for_each_chunk(chunk, &ctxt->rc_call_pcl) {
596 pcl_for_each_segment(segment, chunk) {
597 if (inv_rkey == 0)
598 inv_rkey = segment->rs_handle;
599 else if (inv_rkey != segment->rs_handle)
600 return;
601 }
602 }
603 pcl_for_each_chunk(chunk, &ctxt->rc_read_pcl) {
604 pcl_for_each_segment(segment, chunk) {
605 if (inv_rkey == 0)
606 inv_rkey = segment->rs_handle;
607 else if (inv_rkey != segment->rs_handle)
608 return;
609 }
610 }
611 pcl_for_each_chunk(chunk, &ctxt->rc_write_pcl) {
612 pcl_for_each_segment(segment, chunk) {
613 if (inv_rkey == 0)
614 inv_rkey = segment->rs_handle;
615 else if (inv_rkey != segment->rs_handle)
616 return;
617 }
618 }
619 pcl_for_each_chunk(chunk, &ctxt->rc_reply_pcl) {
620 pcl_for_each_segment(segment, chunk) {
621 if (inv_rkey == 0)
622 inv_rkey = segment->rs_handle;
623 else if (inv_rkey != segment->rs_handle)
624 return;
625 }
626 }
627 ctxt->rc_inv_rkey = inv_rkey;
628 }
629
630 /**
631 * svc_rdma_xdr_decode_req - Decode the transport header
632 * @rq_arg: xdr_buf containing ingress RPC/RDMA message
633 * @rctxt: state of decoding
634 *
635 * On entry, xdr->head[0].iov_base points to first byte of the
636 * RPC-over-RDMA transport header.
637 *
638 * On successful exit, head[0] points to first byte past the
639 * RPC-over-RDMA header. For RDMA_MSG, this is the RPC message.
640 *
641 * The length of the RPC-over-RDMA header is returned.
642 *
643 * Assumptions:
644 * - The transport header is entirely contained in the head iovec.
645 */
svc_rdma_xdr_decode_req(struct xdr_buf * rq_arg,struct svc_rdma_recv_ctxt * rctxt)646 static int svc_rdma_xdr_decode_req(struct xdr_buf *rq_arg,
647 struct svc_rdma_recv_ctxt *rctxt)
648 {
649 __be32 *p, *rdma_argp;
650 unsigned int hdr_len;
651
652 rdma_argp = rq_arg->head[0].iov_base;
653 xdr_init_decode(&rctxt->rc_stream, rq_arg, rdma_argp, NULL);
654
655 p = xdr_inline_decode(&rctxt->rc_stream,
656 rpcrdma_fixed_maxsz * sizeof(*p));
657 if (unlikely(!p))
658 goto out_short;
659 p++;
660 if (*p != rpcrdma_version)
661 goto out_version;
662 p += 2;
663 rctxt->rc_msgtype = *p;
664 switch (rctxt->rc_msgtype) {
665 case rdma_msg:
666 break;
667 case rdma_nomsg:
668 break;
669 case rdma_done:
670 goto out_drop;
671 case rdma_error:
672 goto out_drop;
673 default:
674 goto out_proc;
675 }
676
677 if (!xdr_check_read_list(rctxt))
678 goto out_inval;
679 if (!xdr_check_write_list(rctxt))
680 goto out_inval;
681 if (!xdr_check_reply_chunk(rctxt))
682 goto out_inval;
683
684 rq_arg->head[0].iov_base = rctxt->rc_stream.p;
685 hdr_len = xdr_stream_pos(&rctxt->rc_stream);
686 rq_arg->head[0].iov_len -= hdr_len;
687 rq_arg->len -= hdr_len;
688 trace_svcrdma_decode_rqst(rctxt, rdma_argp, hdr_len);
689 return hdr_len;
690
691 out_short:
692 trace_svcrdma_decode_short_err(rctxt, rq_arg->len);
693 return -EINVAL;
694
695 out_version:
696 trace_svcrdma_decode_badvers_err(rctxt, rdma_argp);
697 return -EPROTONOSUPPORT;
698
699 out_drop:
700 trace_svcrdma_decode_drop_err(rctxt, rdma_argp);
701 return 0;
702
703 out_proc:
704 trace_svcrdma_decode_badproc_err(rctxt, rdma_argp);
705 return -EINVAL;
706
707 out_inval:
708 trace_svcrdma_decode_parse_err(rctxt, rdma_argp);
709 return -EINVAL;
710 }
711
svc_rdma_send_error(struct svcxprt_rdma * rdma,struct svc_rdma_recv_ctxt * rctxt,int status)712 static void svc_rdma_send_error(struct svcxprt_rdma *rdma,
713 struct svc_rdma_recv_ctxt *rctxt,
714 int status)
715 {
716 struct svc_rdma_send_ctxt *sctxt;
717
718 sctxt = svc_rdma_send_ctxt_get(rdma);
719 if (!sctxt)
720 return;
721 svc_rdma_send_error_msg(rdma, sctxt, rctxt, status);
722 }
723
724 /* By convention, backchannel calls arrive via rdma_msg type
725 * messages, and never populate the chunk lists. This makes
726 * the RPC/RDMA header small and fixed in size, so it is
727 * straightforward to check the RPC header's direction field.
728 */
svc_rdma_is_reverse_direction_reply(struct svc_xprt * xprt,struct svc_rdma_recv_ctxt * rctxt)729 static bool svc_rdma_is_reverse_direction_reply(struct svc_xprt *xprt,
730 struct svc_rdma_recv_ctxt *rctxt)
731 {
732 __be32 *p = rctxt->rc_recv_buf;
733
734 if (!xprt->xpt_bc_xprt)
735 return false;
736
737 if (rctxt->rc_msgtype != rdma_msg)
738 return false;
739
740 if (!pcl_is_empty(&rctxt->rc_call_pcl))
741 return false;
742 if (!pcl_is_empty(&rctxt->rc_read_pcl))
743 return false;
744 if (!pcl_is_empty(&rctxt->rc_write_pcl))
745 return false;
746 if (!pcl_is_empty(&rctxt->rc_reply_pcl))
747 return false;
748
749 /* RPC call direction */
750 if (*(p + 8) == cpu_to_be32(RPC_CALL))
751 return false;
752
753 return true;
754 }
755
756 /**
757 * svc_rdma_recvfrom - Receive an RPC call
758 * @rqstp: request structure into which to receive an RPC Call
759 *
760 * Returns:
761 * The positive number of bytes in the RPC Call message,
762 * %0 if there were no Calls ready to return,
763 * %-EINVAL if the Read chunk data is too large,
764 * %-ENOMEM if rdma_rw context pool was exhausted,
765 * %-ENOTCONN if posting failed (connection is lost),
766 * %-EIO if rdma_rw initialization failed (DMA mapping, etc).
767 *
768 * Called in a loop when XPT_DATA is set. XPT_DATA is cleared only
769 * when there are no remaining ctxt's to process.
770 *
771 * The next ctxt is removed from the "receive" lists.
772 *
773 * - If the ctxt completes a Read, then finish assembling the Call
774 * message and return the number of bytes in the message.
775 *
776 * - If the ctxt completes a Receive, then construct the Call
777 * message from the contents of the Receive buffer.
778 *
779 * - If there are no Read chunks in this message, then finish
780 * assembling the Call message and return the number of bytes
781 * in the message.
782 *
783 * - If there are Read chunks in this message, post Read WRs to
784 * pull that payload and return 0.
785 */
svc_rdma_recvfrom(struct svc_rqst * rqstp)786 int svc_rdma_recvfrom(struct svc_rqst *rqstp)
787 {
788 struct svc_xprt *xprt = rqstp->rq_xprt;
789 struct svcxprt_rdma *rdma_xprt =
790 container_of(xprt, struct svcxprt_rdma, sc_xprt);
791 struct svc_rdma_recv_ctxt *ctxt;
792 int ret;
793
794 /* Prevent svc_xprt_release() from releasing pages in rq_pages
795 * when returning 0 or an error.
796 */
797 rqstp->rq_respages = rqstp->rq_pages;
798 rqstp->rq_next_page = rqstp->rq_respages;
799
800 rqstp->rq_xprt_ctxt = NULL;
801
802 ctxt = NULL;
803 spin_lock(&rdma_xprt->sc_rq_dto_lock);
804 ctxt = svc_rdma_next_recv_ctxt(&rdma_xprt->sc_rq_dto_q);
805 if (ctxt)
806 list_del(&ctxt->rc_list);
807 else
808 /* No new incoming requests, terminate the loop */
809 clear_bit(XPT_DATA, &xprt->xpt_flags);
810 spin_unlock(&rdma_xprt->sc_rq_dto_lock);
811
812 /* Unblock the transport for the next receive */
813 svc_xprt_received(xprt);
814 if (!ctxt)
815 return 0;
816
817 percpu_counter_inc(&svcrdma_stat_recv);
818 ib_dma_sync_single_for_cpu(rdma_xprt->sc_pd->device,
819 ctxt->rc_recv_sge.addr, ctxt->rc_byte_len,
820 DMA_FROM_DEVICE);
821 svc_rdma_build_arg_xdr(rqstp, ctxt);
822
823 ret = svc_rdma_xdr_decode_req(&rqstp->rq_arg, ctxt);
824 if (ret < 0)
825 goto out_err;
826 if (ret == 0)
827 goto out_drop;
828
829 if (svc_rdma_is_reverse_direction_reply(xprt, ctxt))
830 goto out_backchannel;
831
832 svc_rdma_get_inv_rkey(rdma_xprt, ctxt);
833
834 if (!pcl_is_empty(&ctxt->rc_read_pcl) ||
835 !pcl_is_empty(&ctxt->rc_call_pcl)) {
836 ret = svc_rdma_process_read_list(rdma_xprt, rqstp, ctxt);
837 if (ret < 0)
838 goto out_readfail;
839 }
840
841 rqstp->rq_xprt_ctxt = ctxt;
842 rqstp->rq_prot = IPPROTO_MAX;
843 svc_xprt_copy_addrs(rqstp, xprt);
844 return rqstp->rq_arg.len;
845
846 out_err:
847 svc_rdma_send_error(rdma_xprt, ctxt, ret);
848 svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
849 return 0;
850
851 out_readfail:
852 if (ret == -EINVAL)
853 svc_rdma_send_error(rdma_xprt, ctxt, ret);
854 svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
855 svc_xprt_deferred_close(xprt);
856 return -ENOTCONN;
857
858 out_backchannel:
859 svc_rdma_handle_bc_reply(rqstp, ctxt);
860 out_drop:
861 svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
862 return 0;
863 }
864