1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/net/sunrpc/xprt.c
4 *
5 * This is a generic RPC call interface supporting congestion avoidance,
6 * and asynchronous calls.
7 *
8 * The interface works like this:
9 *
10 * - When a process places a call, it allocates a request slot if
11 * one is available. Otherwise, it sleeps on the backlog queue
12 * (xprt_reserve).
13 * - Next, the caller puts together the RPC message, stuffs it into
14 * the request struct, and calls xprt_transmit().
15 * - xprt_transmit sends the message and installs the caller on the
16 * transport's wait list. At the same time, if a reply is expected,
17 * it installs a timer that is run after the packet's timeout has
18 * expired.
19 * - When a packet arrives, the data_ready handler walks the list of
20 * pending requests for that transport. If a matching XID is found, the
21 * caller is woken up, and the timer removed.
22 * - When no reply arrives within the timeout interval, the timer is
23 * fired by the kernel and runs xprt_timer(). It either adjusts the
24 * timeout values (minor timeout) or wakes up the caller with a status
25 * of -ETIMEDOUT.
26 * - When the caller receives a notification from RPC that a reply arrived,
27 * it should release the RPC slot, and process the reply.
28 * If the call timed out, it may choose to retry the operation by
29 * adjusting the initial timeout value, and simply calling rpc_call
30 * again.
31 *
32 * Support for async RPC is done through a set of RPC-specific scheduling
33 * primitives that `transparently' work for processes as well as async
34 * tasks that rely on callbacks.
35 *
36 * Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
37 *
38 * Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com>
39 */
40
41 #include <linux/module.h>
42
43 #include <linux/types.h>
44 #include <linux/interrupt.h>
45 #include <linux/workqueue.h>
46 #include <linux/net.h>
47 #include <linux/ktime.h>
48
49 #include <linux/sunrpc/clnt.h>
50 #include <linux/sunrpc/metrics.h>
51 #include <linux/sunrpc/bc_xprt.h>
52 #include <linux/rcupdate.h>
53 #include <linux/sched/mm.h>
54
55 #include <trace/events/sunrpc.h>
56
57 #include "sunrpc.h"
58
59 /*
60 * Local variables
61 */
62
63 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
64 # define RPCDBG_FACILITY RPCDBG_XPRT
65 #endif
66
67 /*
68 * Local functions
69 */
70 static void xprt_init(struct rpc_xprt *xprt, struct net *net);
71 static __be32 xprt_alloc_xid(struct rpc_xprt *xprt);
72 static void xprt_destroy(struct rpc_xprt *xprt);
73
74 static DEFINE_SPINLOCK(xprt_list_lock);
75 static LIST_HEAD(xprt_list);
76
xprt_request_timeout(const struct rpc_rqst * req)77 static unsigned long xprt_request_timeout(const struct rpc_rqst *req)
78 {
79 unsigned long timeout = jiffies + req->rq_timeout;
80
81 if (time_before(timeout, req->rq_majortimeo))
82 return timeout;
83 return req->rq_majortimeo;
84 }
85
86 /**
87 * xprt_register_transport - register a transport implementation
88 * @transport: transport to register
89 *
90 * If a transport implementation is loaded as a kernel module, it can
91 * call this interface to make itself known to the RPC client.
92 *
93 * Returns:
94 * 0: transport successfully registered
95 * -EEXIST: transport already registered
96 * -EINVAL: transport module being unloaded
97 */
xprt_register_transport(struct xprt_class * transport)98 int xprt_register_transport(struct xprt_class *transport)
99 {
100 struct xprt_class *t;
101 int result;
102
103 result = -EEXIST;
104 spin_lock(&xprt_list_lock);
105 list_for_each_entry(t, &xprt_list, list) {
106 /* don't register the same transport class twice */
107 if (t->ident == transport->ident)
108 goto out;
109 }
110
111 list_add_tail(&transport->list, &xprt_list);
112 printk(KERN_INFO "RPC: Registered %s transport module.\n",
113 transport->name);
114 result = 0;
115
116 out:
117 spin_unlock(&xprt_list_lock);
118 return result;
119 }
120 EXPORT_SYMBOL_GPL(xprt_register_transport);
121
122 /**
123 * xprt_unregister_transport - unregister a transport implementation
124 * @transport: transport to unregister
125 *
126 * Returns:
127 * 0: transport successfully unregistered
128 * -ENOENT: transport never registered
129 */
xprt_unregister_transport(struct xprt_class * transport)130 int xprt_unregister_transport(struct xprt_class *transport)
131 {
132 struct xprt_class *t;
133 int result;
134
135 result = 0;
136 spin_lock(&xprt_list_lock);
137 list_for_each_entry(t, &xprt_list, list) {
138 if (t == transport) {
139 printk(KERN_INFO
140 "RPC: Unregistered %s transport module.\n",
141 transport->name);
142 list_del_init(&transport->list);
143 goto out;
144 }
145 }
146 result = -ENOENT;
147
148 out:
149 spin_unlock(&xprt_list_lock);
150 return result;
151 }
152 EXPORT_SYMBOL_GPL(xprt_unregister_transport);
153
154 static void
xprt_class_release(const struct xprt_class * t)155 xprt_class_release(const struct xprt_class *t)
156 {
157 module_put(t->owner);
158 }
159
160 static const struct xprt_class *
xprt_class_find_by_netid_locked(const char * netid)161 xprt_class_find_by_netid_locked(const char *netid)
162 {
163 const struct xprt_class *t;
164 unsigned int i;
165
166 list_for_each_entry(t, &xprt_list, list) {
167 for (i = 0; t->netid[i][0] != '\0'; i++) {
168 if (strcmp(t->netid[i], netid) != 0)
169 continue;
170 if (!try_module_get(t->owner))
171 continue;
172 return t;
173 }
174 }
175 return NULL;
176 }
177
178 static const struct xprt_class *
xprt_class_find_by_netid(const char * netid)179 xprt_class_find_by_netid(const char *netid)
180 {
181 const struct xprt_class *t;
182
183 spin_lock(&xprt_list_lock);
184 t = xprt_class_find_by_netid_locked(netid);
185 if (!t) {
186 spin_unlock(&xprt_list_lock);
187 request_module("rpc%s", netid);
188 spin_lock(&xprt_list_lock);
189 t = xprt_class_find_by_netid_locked(netid);
190 }
191 spin_unlock(&xprt_list_lock);
192 return t;
193 }
194
195 /**
196 * xprt_load_transport - load a transport implementation
197 * @netid: transport to load
198 *
199 * Returns:
200 * 0: transport successfully loaded
201 * -ENOENT: transport module not available
202 */
xprt_load_transport(const char * netid)203 int xprt_load_transport(const char *netid)
204 {
205 const struct xprt_class *t;
206
207 t = xprt_class_find_by_netid(netid);
208 if (!t)
209 return -ENOENT;
210 xprt_class_release(t);
211 return 0;
212 }
213 EXPORT_SYMBOL_GPL(xprt_load_transport);
214
xprt_clear_locked(struct rpc_xprt * xprt)215 static void xprt_clear_locked(struct rpc_xprt *xprt)
216 {
217 xprt->snd_task = NULL;
218 if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
219 smp_mb__before_atomic();
220 clear_bit(XPRT_LOCKED, &xprt->state);
221 smp_mb__after_atomic();
222 } else
223 queue_work(xprtiod_workqueue, &xprt->task_cleanup);
224 }
225
226 /**
227 * xprt_reserve_xprt - serialize write access to transports
228 * @task: task that is requesting access to the transport
229 * @xprt: pointer to the target transport
230 *
231 * This prevents mixing the payload of separate requests, and prevents
232 * transport connects from colliding with writes. No congestion control
233 * is provided.
234 */
xprt_reserve_xprt(struct rpc_xprt * xprt,struct rpc_task * task)235 int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
236 {
237 struct rpc_rqst *req = task->tk_rqstp;
238
239 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
240 if (task == xprt->snd_task)
241 return 1;
242 goto out_sleep;
243 }
244 if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
245 goto out_unlock;
246 xprt->snd_task = task;
247
248 return 1;
249
250 out_unlock:
251 xprt_clear_locked(xprt);
252 out_sleep:
253 dprintk("RPC: %5u failed to lock transport %p\n",
254 task->tk_pid, xprt);
255 task->tk_status = -EAGAIN;
256 if (RPC_IS_SOFT(task))
257 rpc_sleep_on_timeout(&xprt->sending, task, NULL,
258 xprt_request_timeout(req));
259 else
260 rpc_sleep_on(&xprt->sending, task, NULL);
261 return 0;
262 }
263 EXPORT_SYMBOL_GPL(xprt_reserve_xprt);
264
265 static bool
xprt_need_congestion_window_wait(struct rpc_xprt * xprt)266 xprt_need_congestion_window_wait(struct rpc_xprt *xprt)
267 {
268 return test_bit(XPRT_CWND_WAIT, &xprt->state);
269 }
270
271 static void
xprt_set_congestion_window_wait(struct rpc_xprt * xprt)272 xprt_set_congestion_window_wait(struct rpc_xprt *xprt)
273 {
274 if (!list_empty(&xprt->xmit_queue)) {
275 /* Peek at head of queue to see if it can make progress */
276 if (list_first_entry(&xprt->xmit_queue, struct rpc_rqst,
277 rq_xmit)->rq_cong)
278 return;
279 }
280 set_bit(XPRT_CWND_WAIT, &xprt->state);
281 }
282
283 static void
xprt_test_and_clear_congestion_window_wait(struct rpc_xprt * xprt)284 xprt_test_and_clear_congestion_window_wait(struct rpc_xprt *xprt)
285 {
286 if (!RPCXPRT_CONGESTED(xprt))
287 clear_bit(XPRT_CWND_WAIT, &xprt->state);
288 }
289
290 /*
291 * xprt_reserve_xprt_cong - serialize write access to transports
292 * @task: task that is requesting access to the transport
293 *
294 * Same as xprt_reserve_xprt, but Van Jacobson congestion control is
295 * integrated into the decision of whether a request is allowed to be
296 * woken up and given access to the transport.
297 * Note that the lock is only granted if we know there are free slots.
298 */
xprt_reserve_xprt_cong(struct rpc_xprt * xprt,struct rpc_task * task)299 int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
300 {
301 struct rpc_rqst *req = task->tk_rqstp;
302
303 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
304 if (task == xprt->snd_task)
305 return 1;
306 goto out_sleep;
307 }
308 if (req == NULL) {
309 xprt->snd_task = task;
310 return 1;
311 }
312 if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
313 goto out_unlock;
314 if (!xprt_need_congestion_window_wait(xprt)) {
315 xprt->snd_task = task;
316 return 1;
317 }
318 out_unlock:
319 xprt_clear_locked(xprt);
320 out_sleep:
321 dprintk("RPC: %5u failed to lock transport %p\n", task->tk_pid, xprt);
322 task->tk_status = -EAGAIN;
323 if (RPC_IS_SOFT(task))
324 rpc_sleep_on_timeout(&xprt->sending, task, NULL,
325 xprt_request_timeout(req));
326 else
327 rpc_sleep_on(&xprt->sending, task, NULL);
328 return 0;
329 }
330 EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong);
331
xprt_lock_write(struct rpc_xprt * xprt,struct rpc_task * task)332 static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
333 {
334 int retval;
335
336 if (test_bit(XPRT_LOCKED, &xprt->state) && xprt->snd_task == task)
337 return 1;
338 spin_lock(&xprt->transport_lock);
339 retval = xprt->ops->reserve_xprt(xprt, task);
340 spin_unlock(&xprt->transport_lock);
341 return retval;
342 }
343
__xprt_lock_write_func(struct rpc_task * task,void * data)344 static bool __xprt_lock_write_func(struct rpc_task *task, void *data)
345 {
346 struct rpc_xprt *xprt = data;
347
348 xprt->snd_task = task;
349 return true;
350 }
351
__xprt_lock_write_next(struct rpc_xprt * xprt)352 static void __xprt_lock_write_next(struct rpc_xprt *xprt)
353 {
354 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
355 return;
356 if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
357 goto out_unlock;
358 if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending,
359 __xprt_lock_write_func, xprt))
360 return;
361 out_unlock:
362 xprt_clear_locked(xprt);
363 }
364
__xprt_lock_write_next_cong(struct rpc_xprt * xprt)365 static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
366 {
367 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
368 return;
369 if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
370 goto out_unlock;
371 if (xprt_need_congestion_window_wait(xprt))
372 goto out_unlock;
373 if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending,
374 __xprt_lock_write_func, xprt))
375 return;
376 out_unlock:
377 xprt_clear_locked(xprt);
378 }
379
380 /**
381 * xprt_release_xprt - allow other requests to use a transport
382 * @xprt: transport with other tasks potentially waiting
383 * @task: task that is releasing access to the transport
384 *
385 * Note that "task" can be NULL. No congestion control is provided.
386 */
xprt_release_xprt(struct rpc_xprt * xprt,struct rpc_task * task)387 void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
388 {
389 if (xprt->snd_task == task) {
390 xprt_clear_locked(xprt);
391 __xprt_lock_write_next(xprt);
392 }
393 }
394 EXPORT_SYMBOL_GPL(xprt_release_xprt);
395
396 /**
397 * xprt_release_xprt_cong - allow other requests to use a transport
398 * @xprt: transport with other tasks potentially waiting
399 * @task: task that is releasing access to the transport
400 *
401 * Note that "task" can be NULL. Another task is awoken to use the
402 * transport if the transport's congestion window allows it.
403 */
xprt_release_xprt_cong(struct rpc_xprt * xprt,struct rpc_task * task)404 void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
405 {
406 if (xprt->snd_task == task) {
407 xprt_clear_locked(xprt);
408 __xprt_lock_write_next_cong(xprt);
409 }
410 }
411 EXPORT_SYMBOL_GPL(xprt_release_xprt_cong);
412
xprt_release_write(struct rpc_xprt * xprt,struct rpc_task * task)413 static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
414 {
415 if (xprt->snd_task != task)
416 return;
417 spin_lock(&xprt->transport_lock);
418 xprt->ops->release_xprt(xprt, task);
419 spin_unlock(&xprt->transport_lock);
420 }
421
422 /*
423 * Van Jacobson congestion avoidance. Check if the congestion window
424 * overflowed. Put the task to sleep if this is the case.
425 */
426 static int
__xprt_get_cong(struct rpc_xprt * xprt,struct rpc_rqst * req)427 __xprt_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
428 {
429 if (req->rq_cong)
430 return 1;
431 dprintk("RPC: %5u xprt_cwnd_limited cong = %lu cwnd = %lu\n",
432 req->rq_task->tk_pid, xprt->cong, xprt->cwnd);
433 if (RPCXPRT_CONGESTED(xprt)) {
434 xprt_set_congestion_window_wait(xprt);
435 return 0;
436 }
437 req->rq_cong = 1;
438 xprt->cong += RPC_CWNDSCALE;
439 return 1;
440 }
441
442 /*
443 * Adjust the congestion window, and wake up the next task
444 * that has been sleeping due to congestion
445 */
446 static void
__xprt_put_cong(struct rpc_xprt * xprt,struct rpc_rqst * req)447 __xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
448 {
449 if (!req->rq_cong)
450 return;
451 req->rq_cong = 0;
452 xprt->cong -= RPC_CWNDSCALE;
453 xprt_test_and_clear_congestion_window_wait(xprt);
454 __xprt_lock_write_next_cong(xprt);
455 }
456
457 /**
458 * xprt_request_get_cong - Request congestion control credits
459 * @xprt: pointer to transport
460 * @req: pointer to RPC request
461 *
462 * Useful for transports that require congestion control.
463 */
464 bool
xprt_request_get_cong(struct rpc_xprt * xprt,struct rpc_rqst * req)465 xprt_request_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
466 {
467 bool ret = false;
468
469 if (req->rq_cong)
470 return true;
471 spin_lock(&xprt->transport_lock);
472 ret = __xprt_get_cong(xprt, req) != 0;
473 spin_unlock(&xprt->transport_lock);
474 return ret;
475 }
476 EXPORT_SYMBOL_GPL(xprt_request_get_cong);
477
478 /**
479 * xprt_release_rqst_cong - housekeeping when request is complete
480 * @task: RPC request that recently completed
481 *
482 * Useful for transports that require congestion control.
483 */
xprt_release_rqst_cong(struct rpc_task * task)484 void xprt_release_rqst_cong(struct rpc_task *task)
485 {
486 struct rpc_rqst *req = task->tk_rqstp;
487
488 __xprt_put_cong(req->rq_xprt, req);
489 }
490 EXPORT_SYMBOL_GPL(xprt_release_rqst_cong);
491
xprt_clear_congestion_window_wait_locked(struct rpc_xprt * xprt)492 static void xprt_clear_congestion_window_wait_locked(struct rpc_xprt *xprt)
493 {
494 if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state))
495 __xprt_lock_write_next_cong(xprt);
496 }
497
498 /*
499 * Clear the congestion window wait flag and wake up the next
500 * entry on xprt->sending
501 */
502 static void
xprt_clear_congestion_window_wait(struct rpc_xprt * xprt)503 xprt_clear_congestion_window_wait(struct rpc_xprt *xprt)
504 {
505 if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state)) {
506 spin_lock(&xprt->transport_lock);
507 __xprt_lock_write_next_cong(xprt);
508 spin_unlock(&xprt->transport_lock);
509 }
510 }
511
512 /**
513 * xprt_adjust_cwnd - adjust transport congestion window
514 * @xprt: pointer to xprt
515 * @task: recently completed RPC request used to adjust window
516 * @result: result code of completed RPC request
517 *
518 * The transport code maintains an estimate on the maximum number of out-
519 * standing RPC requests, using a smoothed version of the congestion
520 * avoidance implemented in 44BSD. This is basically the Van Jacobson
521 * congestion algorithm: If a retransmit occurs, the congestion window is
522 * halved; otherwise, it is incremented by 1/cwnd when
523 *
524 * - a reply is received and
525 * - a full number of requests are outstanding and
526 * - the congestion window hasn't been updated recently.
527 */
xprt_adjust_cwnd(struct rpc_xprt * xprt,struct rpc_task * task,int result)528 void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result)
529 {
530 struct rpc_rqst *req = task->tk_rqstp;
531 unsigned long cwnd = xprt->cwnd;
532
533 if (result >= 0 && cwnd <= xprt->cong) {
534 /* The (cwnd >> 1) term makes sure
535 * the result gets rounded properly. */
536 cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd;
537 if (cwnd > RPC_MAXCWND(xprt))
538 cwnd = RPC_MAXCWND(xprt);
539 __xprt_lock_write_next_cong(xprt);
540 } else if (result == -ETIMEDOUT) {
541 cwnd >>= 1;
542 if (cwnd < RPC_CWNDSCALE)
543 cwnd = RPC_CWNDSCALE;
544 }
545 dprintk("RPC: cong %ld, cwnd was %ld, now %ld\n",
546 xprt->cong, xprt->cwnd, cwnd);
547 xprt->cwnd = cwnd;
548 __xprt_put_cong(xprt, req);
549 }
550 EXPORT_SYMBOL_GPL(xprt_adjust_cwnd);
551
552 /**
553 * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue
554 * @xprt: transport with waiting tasks
555 * @status: result code to plant in each task before waking it
556 *
557 */
xprt_wake_pending_tasks(struct rpc_xprt * xprt,int status)558 void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status)
559 {
560 if (status < 0)
561 rpc_wake_up_status(&xprt->pending, status);
562 else
563 rpc_wake_up(&xprt->pending);
564 }
565 EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks);
566
567 /**
568 * xprt_wait_for_buffer_space - wait for transport output buffer to clear
569 * @xprt: transport
570 *
571 * Note that we only set the timer for the case of RPC_IS_SOFT(), since
572 * we don't in general want to force a socket disconnection due to
573 * an incomplete RPC call transmission.
574 */
xprt_wait_for_buffer_space(struct rpc_xprt * xprt)575 void xprt_wait_for_buffer_space(struct rpc_xprt *xprt)
576 {
577 set_bit(XPRT_WRITE_SPACE, &xprt->state);
578 }
579 EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space);
580
581 static bool
xprt_clear_write_space_locked(struct rpc_xprt * xprt)582 xprt_clear_write_space_locked(struct rpc_xprt *xprt)
583 {
584 if (test_and_clear_bit(XPRT_WRITE_SPACE, &xprt->state)) {
585 __xprt_lock_write_next(xprt);
586 dprintk("RPC: write space: waking waiting task on "
587 "xprt %p\n", xprt);
588 return true;
589 }
590 return false;
591 }
592
593 /**
594 * xprt_write_space - wake the task waiting for transport output buffer space
595 * @xprt: transport with waiting tasks
596 *
597 * Can be called in a soft IRQ context, so xprt_write_space never sleeps.
598 */
xprt_write_space(struct rpc_xprt * xprt)599 bool xprt_write_space(struct rpc_xprt *xprt)
600 {
601 bool ret;
602
603 if (!test_bit(XPRT_WRITE_SPACE, &xprt->state))
604 return false;
605 spin_lock(&xprt->transport_lock);
606 ret = xprt_clear_write_space_locked(xprt);
607 spin_unlock(&xprt->transport_lock);
608 return ret;
609 }
610 EXPORT_SYMBOL_GPL(xprt_write_space);
611
xprt_abs_ktime_to_jiffies(ktime_t abstime)612 static unsigned long xprt_abs_ktime_to_jiffies(ktime_t abstime)
613 {
614 s64 delta = ktime_to_ns(ktime_get() - abstime);
615 return likely(delta >= 0) ?
616 jiffies - nsecs_to_jiffies(delta) :
617 jiffies + nsecs_to_jiffies(-delta);
618 }
619
xprt_calc_majortimeo(struct rpc_rqst * req)620 static unsigned long xprt_calc_majortimeo(struct rpc_rqst *req)
621 {
622 const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
623 unsigned long majortimeo = req->rq_timeout;
624
625 if (to->to_exponential)
626 majortimeo <<= to->to_retries;
627 else
628 majortimeo += to->to_increment * to->to_retries;
629 if (majortimeo > to->to_maxval || majortimeo == 0)
630 majortimeo = to->to_maxval;
631 return majortimeo;
632 }
633
xprt_reset_majortimeo(struct rpc_rqst * req)634 static void xprt_reset_majortimeo(struct rpc_rqst *req)
635 {
636 req->rq_majortimeo += xprt_calc_majortimeo(req);
637 }
638
xprt_init_majortimeo(struct rpc_task * task,struct rpc_rqst * req)639 static void xprt_init_majortimeo(struct rpc_task *task, struct rpc_rqst *req)
640 {
641 unsigned long time_init;
642 struct rpc_xprt *xprt = req->rq_xprt;
643
644 if (likely(xprt && xprt_connected(xprt)))
645 time_init = jiffies;
646 else
647 time_init = xprt_abs_ktime_to_jiffies(task->tk_start);
648 req->rq_timeout = task->tk_client->cl_timeout->to_initval;
649 req->rq_majortimeo = time_init + xprt_calc_majortimeo(req);
650 }
651
652 /**
653 * xprt_adjust_timeout - adjust timeout values for next retransmit
654 * @req: RPC request containing parameters to use for the adjustment
655 *
656 */
xprt_adjust_timeout(struct rpc_rqst * req)657 int xprt_adjust_timeout(struct rpc_rqst *req)
658 {
659 struct rpc_xprt *xprt = req->rq_xprt;
660 const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
661 int status = 0;
662
663 if (time_before(jiffies, req->rq_majortimeo)) {
664 if (to->to_exponential)
665 req->rq_timeout <<= 1;
666 else
667 req->rq_timeout += to->to_increment;
668 if (to->to_maxval && req->rq_timeout >= to->to_maxval)
669 req->rq_timeout = to->to_maxval;
670 req->rq_retries++;
671 } else {
672 req->rq_timeout = to->to_initval;
673 req->rq_retries = 0;
674 xprt_reset_majortimeo(req);
675 /* Reset the RTT counters == "slow start" */
676 spin_lock(&xprt->transport_lock);
677 rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval);
678 spin_unlock(&xprt->transport_lock);
679 status = -ETIMEDOUT;
680 }
681
682 if (req->rq_timeout == 0) {
683 printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n");
684 req->rq_timeout = 5 * HZ;
685 }
686 return status;
687 }
688
xprt_autoclose(struct work_struct * work)689 static void xprt_autoclose(struct work_struct *work)
690 {
691 struct rpc_xprt *xprt =
692 container_of(work, struct rpc_xprt, task_cleanup);
693 unsigned int pflags = memalloc_nofs_save();
694
695 clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
696 xprt->ops->close(xprt);
697 xprt_release_write(xprt, NULL);
698 wake_up_bit(&xprt->state, XPRT_LOCKED);
699 memalloc_nofs_restore(pflags);
700 }
701
702 /**
703 * xprt_disconnect_done - mark a transport as disconnected
704 * @xprt: transport to flag for disconnect
705 *
706 */
xprt_disconnect_done(struct rpc_xprt * xprt)707 void xprt_disconnect_done(struct rpc_xprt *xprt)
708 {
709 dprintk("RPC: disconnected transport %p\n", xprt);
710 spin_lock(&xprt->transport_lock);
711 xprt_clear_connected(xprt);
712 xprt_clear_write_space_locked(xprt);
713 xprt_clear_congestion_window_wait_locked(xprt);
714 xprt_wake_pending_tasks(xprt, -ENOTCONN);
715 spin_unlock(&xprt->transport_lock);
716 }
717 EXPORT_SYMBOL_GPL(xprt_disconnect_done);
718
719 /**
720 * xprt_schedule_autoclose_locked - Try to schedule an autoclose RPC call
721 * @xprt: transport to disconnect
722 */
xprt_schedule_autoclose_locked(struct rpc_xprt * xprt)723 static void xprt_schedule_autoclose_locked(struct rpc_xprt *xprt)
724 {
725 if (test_and_set_bit(XPRT_CLOSE_WAIT, &xprt->state))
726 return;
727 if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
728 queue_work(xprtiod_workqueue, &xprt->task_cleanup);
729 else if (xprt->snd_task && !test_bit(XPRT_SND_IS_COOKIE, &xprt->state))
730 rpc_wake_up_queued_task_set_status(&xprt->pending,
731 xprt->snd_task, -ENOTCONN);
732 }
733
734 /**
735 * xprt_force_disconnect - force a transport to disconnect
736 * @xprt: transport to disconnect
737 *
738 */
xprt_force_disconnect(struct rpc_xprt * xprt)739 void xprt_force_disconnect(struct rpc_xprt *xprt)
740 {
741 /* Don't race with the test_bit() in xprt_clear_locked() */
742 spin_lock(&xprt->transport_lock);
743 xprt_schedule_autoclose_locked(xprt);
744 spin_unlock(&xprt->transport_lock);
745 }
746 EXPORT_SYMBOL_GPL(xprt_force_disconnect);
747
748 static unsigned int
xprt_connect_cookie(struct rpc_xprt * xprt)749 xprt_connect_cookie(struct rpc_xprt *xprt)
750 {
751 return READ_ONCE(xprt->connect_cookie);
752 }
753
754 static bool
xprt_request_retransmit_after_disconnect(struct rpc_task * task)755 xprt_request_retransmit_after_disconnect(struct rpc_task *task)
756 {
757 struct rpc_rqst *req = task->tk_rqstp;
758 struct rpc_xprt *xprt = req->rq_xprt;
759
760 return req->rq_connect_cookie != xprt_connect_cookie(xprt) ||
761 !xprt_connected(xprt);
762 }
763
764 /**
765 * xprt_conditional_disconnect - force a transport to disconnect
766 * @xprt: transport to disconnect
767 * @cookie: 'connection cookie'
768 *
769 * This attempts to break the connection if and only if 'cookie' matches
770 * the current transport 'connection cookie'. It ensures that we don't
771 * try to break the connection more than once when we need to retransmit
772 * a batch of RPC requests.
773 *
774 */
xprt_conditional_disconnect(struct rpc_xprt * xprt,unsigned int cookie)775 void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie)
776 {
777 /* Don't race with the test_bit() in xprt_clear_locked() */
778 spin_lock(&xprt->transport_lock);
779 if (cookie != xprt->connect_cookie)
780 goto out;
781 if (test_bit(XPRT_CLOSING, &xprt->state))
782 goto out;
783 xprt_schedule_autoclose_locked(xprt);
784 out:
785 spin_unlock(&xprt->transport_lock);
786 }
787
788 static bool
xprt_has_timer(const struct rpc_xprt * xprt)789 xprt_has_timer(const struct rpc_xprt *xprt)
790 {
791 return xprt->idle_timeout != 0;
792 }
793
794 static void
xprt_schedule_autodisconnect(struct rpc_xprt * xprt)795 xprt_schedule_autodisconnect(struct rpc_xprt *xprt)
796 __must_hold(&xprt->transport_lock)
797 {
798 xprt->last_used = jiffies;
799 if (RB_EMPTY_ROOT(&xprt->recv_queue) && xprt_has_timer(xprt))
800 mod_timer(&xprt->timer, xprt->last_used + xprt->idle_timeout);
801 }
802
803 static void
xprt_init_autodisconnect(struct timer_list * t)804 xprt_init_autodisconnect(struct timer_list *t)
805 {
806 struct rpc_xprt *xprt = from_timer(xprt, t, timer);
807
808 if (!RB_EMPTY_ROOT(&xprt->recv_queue))
809 return;
810 /* Reset xprt->last_used to avoid connect/autodisconnect cycling */
811 xprt->last_used = jiffies;
812 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
813 return;
814 queue_work(xprtiod_workqueue, &xprt->task_cleanup);
815 }
816
xprt_lock_connect(struct rpc_xprt * xprt,struct rpc_task * task,void * cookie)817 bool xprt_lock_connect(struct rpc_xprt *xprt,
818 struct rpc_task *task,
819 void *cookie)
820 {
821 bool ret = false;
822
823 spin_lock(&xprt->transport_lock);
824 if (!test_bit(XPRT_LOCKED, &xprt->state))
825 goto out;
826 if (xprt->snd_task != task)
827 goto out;
828 set_bit(XPRT_SND_IS_COOKIE, &xprt->state);
829 xprt->snd_task = cookie;
830 ret = true;
831 out:
832 spin_unlock(&xprt->transport_lock);
833 return ret;
834 }
835
xprt_unlock_connect(struct rpc_xprt * xprt,void * cookie)836 void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie)
837 {
838 spin_lock(&xprt->transport_lock);
839 if (xprt->snd_task != cookie)
840 goto out;
841 if (!test_bit(XPRT_LOCKED, &xprt->state))
842 goto out;
843 xprt->snd_task =NULL;
844 clear_bit(XPRT_SND_IS_COOKIE, &xprt->state);
845 xprt->ops->release_xprt(xprt, NULL);
846 xprt_schedule_autodisconnect(xprt);
847 out:
848 spin_unlock(&xprt->transport_lock);
849 wake_up_bit(&xprt->state, XPRT_LOCKED);
850 }
851
852 /**
853 * xprt_connect - schedule a transport connect operation
854 * @task: RPC task that is requesting the connect
855 *
856 */
xprt_connect(struct rpc_task * task)857 void xprt_connect(struct rpc_task *task)
858 {
859 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
860
861 dprintk("RPC: %5u xprt_connect xprt %p %s connected\n", task->tk_pid,
862 xprt, (xprt_connected(xprt) ? "is" : "is not"));
863
864 if (!xprt_bound(xprt)) {
865 task->tk_status = -EAGAIN;
866 return;
867 }
868 if (!xprt_lock_write(xprt, task))
869 return;
870
871 if (!xprt_connected(xprt) && !test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
872 task->tk_rqstp->rq_connect_cookie = xprt->connect_cookie;
873 rpc_sleep_on_timeout(&xprt->pending, task, NULL,
874 xprt_request_timeout(task->tk_rqstp));
875
876 if (test_bit(XPRT_CLOSING, &xprt->state))
877 return;
878 if (xprt_test_and_set_connecting(xprt))
879 return;
880 /* Race breaker */
881 if (!xprt_connected(xprt)) {
882 xprt->stat.connect_start = jiffies;
883 xprt->ops->connect(xprt, task);
884 } else {
885 xprt_clear_connecting(xprt);
886 task->tk_status = 0;
887 rpc_wake_up_queued_task(&xprt->pending, task);
888 }
889 }
890 xprt_release_write(xprt, task);
891 }
892
893 /**
894 * xprt_reconnect_delay - compute the wait before scheduling a connect
895 * @xprt: transport instance
896 *
897 */
xprt_reconnect_delay(const struct rpc_xprt * xprt)898 unsigned long xprt_reconnect_delay(const struct rpc_xprt *xprt)
899 {
900 unsigned long start, now = jiffies;
901
902 start = xprt->stat.connect_start + xprt->reestablish_timeout;
903 if (time_after(start, now))
904 return start - now;
905 return 0;
906 }
907 EXPORT_SYMBOL_GPL(xprt_reconnect_delay);
908
909 /**
910 * xprt_reconnect_backoff - compute the new re-establish timeout
911 * @xprt: transport instance
912 * @init_to: initial reestablish timeout
913 *
914 */
xprt_reconnect_backoff(struct rpc_xprt * xprt,unsigned long init_to)915 void xprt_reconnect_backoff(struct rpc_xprt *xprt, unsigned long init_to)
916 {
917 xprt->reestablish_timeout <<= 1;
918 if (xprt->reestablish_timeout > xprt->max_reconnect_timeout)
919 xprt->reestablish_timeout = xprt->max_reconnect_timeout;
920 if (xprt->reestablish_timeout < init_to)
921 xprt->reestablish_timeout = init_to;
922 }
923 EXPORT_SYMBOL_GPL(xprt_reconnect_backoff);
924
925 enum xprt_xid_rb_cmp {
926 XID_RB_EQUAL,
927 XID_RB_LEFT,
928 XID_RB_RIGHT,
929 };
930 static enum xprt_xid_rb_cmp
xprt_xid_cmp(__be32 xid1,__be32 xid2)931 xprt_xid_cmp(__be32 xid1, __be32 xid2)
932 {
933 if (xid1 == xid2)
934 return XID_RB_EQUAL;
935 if ((__force u32)xid1 < (__force u32)xid2)
936 return XID_RB_LEFT;
937 return XID_RB_RIGHT;
938 }
939
940 static struct rpc_rqst *
xprt_request_rb_find(struct rpc_xprt * xprt,__be32 xid)941 xprt_request_rb_find(struct rpc_xprt *xprt, __be32 xid)
942 {
943 struct rb_node *n = xprt->recv_queue.rb_node;
944 struct rpc_rqst *req;
945
946 while (n != NULL) {
947 req = rb_entry(n, struct rpc_rqst, rq_recv);
948 switch (xprt_xid_cmp(xid, req->rq_xid)) {
949 case XID_RB_LEFT:
950 n = n->rb_left;
951 break;
952 case XID_RB_RIGHT:
953 n = n->rb_right;
954 break;
955 case XID_RB_EQUAL:
956 return req;
957 }
958 }
959 return NULL;
960 }
961
962 static void
xprt_request_rb_insert(struct rpc_xprt * xprt,struct rpc_rqst * new)963 xprt_request_rb_insert(struct rpc_xprt *xprt, struct rpc_rqst *new)
964 {
965 struct rb_node **p = &xprt->recv_queue.rb_node;
966 struct rb_node *n = NULL;
967 struct rpc_rqst *req;
968
969 while (*p != NULL) {
970 n = *p;
971 req = rb_entry(n, struct rpc_rqst, rq_recv);
972 switch(xprt_xid_cmp(new->rq_xid, req->rq_xid)) {
973 case XID_RB_LEFT:
974 p = &n->rb_left;
975 break;
976 case XID_RB_RIGHT:
977 p = &n->rb_right;
978 break;
979 case XID_RB_EQUAL:
980 WARN_ON_ONCE(new != req);
981 return;
982 }
983 }
984 rb_link_node(&new->rq_recv, n, p);
985 rb_insert_color(&new->rq_recv, &xprt->recv_queue);
986 }
987
988 static void
xprt_request_rb_remove(struct rpc_xprt * xprt,struct rpc_rqst * req)989 xprt_request_rb_remove(struct rpc_xprt *xprt, struct rpc_rqst *req)
990 {
991 rb_erase(&req->rq_recv, &xprt->recv_queue);
992 }
993
994 /**
995 * xprt_lookup_rqst - find an RPC request corresponding to an XID
996 * @xprt: transport on which the original request was transmitted
997 * @xid: RPC XID of incoming reply
998 *
999 * Caller holds xprt->queue_lock.
1000 */
xprt_lookup_rqst(struct rpc_xprt * xprt,__be32 xid)1001 struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
1002 {
1003 struct rpc_rqst *entry;
1004
1005 entry = xprt_request_rb_find(xprt, xid);
1006 if (entry != NULL) {
1007 trace_xprt_lookup_rqst(xprt, xid, 0);
1008 entry->rq_rtt = ktime_sub(ktime_get(), entry->rq_xtime);
1009 return entry;
1010 }
1011
1012 dprintk("RPC: xprt_lookup_rqst did not find xid %08x\n",
1013 ntohl(xid));
1014 trace_xprt_lookup_rqst(xprt, xid, -ENOENT);
1015 xprt->stat.bad_xids++;
1016 return NULL;
1017 }
1018 EXPORT_SYMBOL_GPL(xprt_lookup_rqst);
1019
1020 static bool
xprt_is_pinned_rqst(struct rpc_rqst * req)1021 xprt_is_pinned_rqst(struct rpc_rqst *req)
1022 {
1023 return atomic_read(&req->rq_pin) != 0;
1024 }
1025
1026 /**
1027 * xprt_pin_rqst - Pin a request on the transport receive list
1028 * @req: Request to pin
1029 *
1030 * Caller must ensure this is atomic with the call to xprt_lookup_rqst()
1031 * so should be holding xprt->queue_lock.
1032 */
xprt_pin_rqst(struct rpc_rqst * req)1033 void xprt_pin_rqst(struct rpc_rqst *req)
1034 {
1035 atomic_inc(&req->rq_pin);
1036 }
1037 EXPORT_SYMBOL_GPL(xprt_pin_rqst);
1038
1039 /**
1040 * xprt_unpin_rqst - Unpin a request on the transport receive list
1041 * @req: Request to pin
1042 *
1043 * Caller should be holding xprt->queue_lock.
1044 */
xprt_unpin_rqst(struct rpc_rqst * req)1045 void xprt_unpin_rqst(struct rpc_rqst *req)
1046 {
1047 if (!test_bit(RPC_TASK_MSG_PIN_WAIT, &req->rq_task->tk_runstate)) {
1048 atomic_dec(&req->rq_pin);
1049 return;
1050 }
1051 if (atomic_dec_and_test(&req->rq_pin))
1052 wake_up_var(&req->rq_pin);
1053 }
1054 EXPORT_SYMBOL_GPL(xprt_unpin_rqst);
1055
xprt_wait_on_pinned_rqst(struct rpc_rqst * req)1056 static void xprt_wait_on_pinned_rqst(struct rpc_rqst *req)
1057 {
1058 wait_var_event(&req->rq_pin, !xprt_is_pinned_rqst(req));
1059 }
1060
1061 static bool
xprt_request_data_received(struct rpc_task * task)1062 xprt_request_data_received(struct rpc_task *task)
1063 {
1064 return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) &&
1065 READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) != 0;
1066 }
1067
1068 static bool
xprt_request_need_enqueue_receive(struct rpc_task * task,struct rpc_rqst * req)1069 xprt_request_need_enqueue_receive(struct rpc_task *task, struct rpc_rqst *req)
1070 {
1071 return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) &&
1072 READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) == 0;
1073 }
1074
1075 /**
1076 * xprt_request_enqueue_receive - Add an request to the receive queue
1077 * @task: RPC task
1078 *
1079 */
1080 void
xprt_request_enqueue_receive(struct rpc_task * task)1081 xprt_request_enqueue_receive(struct rpc_task *task)
1082 {
1083 struct rpc_rqst *req = task->tk_rqstp;
1084 struct rpc_xprt *xprt = req->rq_xprt;
1085
1086 if (!xprt_request_need_enqueue_receive(task, req))
1087 return;
1088
1089 xprt_request_prepare(task->tk_rqstp);
1090 spin_lock(&xprt->queue_lock);
1091
1092 /* Update the softirq receive buffer */
1093 memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
1094 sizeof(req->rq_private_buf));
1095
1096 /* Add request to the receive list */
1097 xprt_request_rb_insert(xprt, req);
1098 set_bit(RPC_TASK_NEED_RECV, &task->tk_runstate);
1099 spin_unlock(&xprt->queue_lock);
1100
1101 /* Turn off autodisconnect */
1102 del_singleshot_timer_sync(&xprt->timer);
1103 }
1104
1105 /**
1106 * xprt_request_dequeue_receive_locked - Remove a request from the receive queue
1107 * @task: RPC task
1108 *
1109 * Caller must hold xprt->queue_lock.
1110 */
1111 static void
xprt_request_dequeue_receive_locked(struct rpc_task * task)1112 xprt_request_dequeue_receive_locked(struct rpc_task *task)
1113 {
1114 struct rpc_rqst *req = task->tk_rqstp;
1115
1116 if (test_and_clear_bit(RPC_TASK_NEED_RECV, &task->tk_runstate))
1117 xprt_request_rb_remove(req->rq_xprt, req);
1118 }
1119
1120 /**
1121 * xprt_update_rtt - Update RPC RTT statistics
1122 * @task: RPC request that recently completed
1123 *
1124 * Caller holds xprt->queue_lock.
1125 */
xprt_update_rtt(struct rpc_task * task)1126 void xprt_update_rtt(struct rpc_task *task)
1127 {
1128 struct rpc_rqst *req = task->tk_rqstp;
1129 struct rpc_rtt *rtt = task->tk_client->cl_rtt;
1130 unsigned int timer = task->tk_msg.rpc_proc->p_timer;
1131 long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt));
1132
1133 if (timer) {
1134 if (req->rq_ntrans == 1)
1135 rpc_update_rtt(rtt, timer, m);
1136 rpc_set_timeo(rtt, timer, req->rq_ntrans - 1);
1137 }
1138 }
1139 EXPORT_SYMBOL_GPL(xprt_update_rtt);
1140
1141 /**
1142 * xprt_complete_rqst - called when reply processing is complete
1143 * @task: RPC request that recently completed
1144 * @copied: actual number of bytes received from the transport
1145 *
1146 * Caller holds xprt->queue_lock.
1147 */
xprt_complete_rqst(struct rpc_task * task,int copied)1148 void xprt_complete_rqst(struct rpc_task *task, int copied)
1149 {
1150 struct rpc_rqst *req = task->tk_rqstp;
1151 struct rpc_xprt *xprt = req->rq_xprt;
1152
1153 dprintk("RPC: %5u xid %08x complete (%d bytes received)\n",
1154 task->tk_pid, ntohl(req->rq_xid), copied);
1155 trace_xprt_complete_rqst(xprt, req->rq_xid, copied);
1156
1157 xprt->stat.recvs++;
1158
1159 req->rq_private_buf.len = copied;
1160 /* Ensure all writes are done before we update */
1161 /* req->rq_reply_bytes_recvd */
1162 smp_wmb();
1163 req->rq_reply_bytes_recvd = copied;
1164 xprt_request_dequeue_receive_locked(task);
1165 rpc_wake_up_queued_task(&xprt->pending, task);
1166 }
1167 EXPORT_SYMBOL_GPL(xprt_complete_rqst);
1168
xprt_timer(struct rpc_task * task)1169 static void xprt_timer(struct rpc_task *task)
1170 {
1171 struct rpc_rqst *req = task->tk_rqstp;
1172 struct rpc_xprt *xprt = req->rq_xprt;
1173
1174 if (task->tk_status != -ETIMEDOUT)
1175 return;
1176
1177 trace_xprt_timer(xprt, req->rq_xid, task->tk_status);
1178 if (!req->rq_reply_bytes_recvd) {
1179 if (xprt->ops->timer)
1180 xprt->ops->timer(xprt, task);
1181 } else
1182 task->tk_status = 0;
1183 }
1184
1185 /**
1186 * xprt_wait_for_reply_request_def - wait for reply
1187 * @task: pointer to rpc_task
1188 *
1189 * Set a request's retransmit timeout based on the transport's
1190 * default timeout parameters. Used by transports that don't adjust
1191 * the retransmit timeout based on round-trip time estimation,
1192 * and put the task to sleep on the pending queue.
1193 */
xprt_wait_for_reply_request_def(struct rpc_task * task)1194 void xprt_wait_for_reply_request_def(struct rpc_task *task)
1195 {
1196 struct rpc_rqst *req = task->tk_rqstp;
1197
1198 rpc_sleep_on_timeout(&req->rq_xprt->pending, task, xprt_timer,
1199 xprt_request_timeout(req));
1200 }
1201 EXPORT_SYMBOL_GPL(xprt_wait_for_reply_request_def);
1202
1203 /**
1204 * xprt_wait_for_reply_request_rtt - wait for reply using RTT estimator
1205 * @task: pointer to rpc_task
1206 *
1207 * Set a request's retransmit timeout using the RTT estimator,
1208 * and put the task to sleep on the pending queue.
1209 */
xprt_wait_for_reply_request_rtt(struct rpc_task * task)1210 void xprt_wait_for_reply_request_rtt(struct rpc_task *task)
1211 {
1212 int timer = task->tk_msg.rpc_proc->p_timer;
1213 struct rpc_clnt *clnt = task->tk_client;
1214 struct rpc_rtt *rtt = clnt->cl_rtt;
1215 struct rpc_rqst *req = task->tk_rqstp;
1216 unsigned long max_timeout = clnt->cl_timeout->to_maxval;
1217 unsigned long timeout;
1218
1219 timeout = rpc_calc_rto(rtt, timer);
1220 timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries;
1221 if (timeout > max_timeout || timeout == 0)
1222 timeout = max_timeout;
1223 rpc_sleep_on_timeout(&req->rq_xprt->pending, task, xprt_timer,
1224 jiffies + timeout);
1225 }
1226 EXPORT_SYMBOL_GPL(xprt_wait_for_reply_request_rtt);
1227
1228 /**
1229 * xprt_request_wait_receive - wait for the reply to an RPC request
1230 * @task: RPC task about to send a request
1231 *
1232 */
xprt_request_wait_receive(struct rpc_task * task)1233 void xprt_request_wait_receive(struct rpc_task *task)
1234 {
1235 struct rpc_rqst *req = task->tk_rqstp;
1236 struct rpc_xprt *xprt = req->rq_xprt;
1237
1238 if (!test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate))
1239 return;
1240 /*
1241 * Sleep on the pending queue if we're expecting a reply.
1242 * The spinlock ensures atomicity between the test of
1243 * req->rq_reply_bytes_recvd, and the call to rpc_sleep_on().
1244 */
1245 spin_lock(&xprt->queue_lock);
1246 if (test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate)) {
1247 xprt->ops->wait_for_reply_request(task);
1248 /*
1249 * Send an extra queue wakeup call if the
1250 * connection was dropped in case the call to
1251 * rpc_sleep_on() raced.
1252 */
1253 if (xprt_request_retransmit_after_disconnect(task))
1254 rpc_wake_up_queued_task_set_status(&xprt->pending,
1255 task, -ENOTCONN);
1256 }
1257 spin_unlock(&xprt->queue_lock);
1258 }
1259
1260 static bool
xprt_request_need_enqueue_transmit(struct rpc_task * task,struct rpc_rqst * req)1261 xprt_request_need_enqueue_transmit(struct rpc_task *task, struct rpc_rqst *req)
1262 {
1263 return !test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
1264 }
1265
1266 /**
1267 * xprt_request_enqueue_transmit - queue a task for transmission
1268 * @task: pointer to rpc_task
1269 *
1270 * Add a task to the transmission queue.
1271 */
1272 void
xprt_request_enqueue_transmit(struct rpc_task * task)1273 xprt_request_enqueue_transmit(struct rpc_task *task)
1274 {
1275 struct rpc_rqst *pos, *req = task->tk_rqstp;
1276 struct rpc_xprt *xprt = req->rq_xprt;
1277
1278 if (xprt_request_need_enqueue_transmit(task, req)) {
1279 req->rq_bytes_sent = 0;
1280 spin_lock(&xprt->queue_lock);
1281 /*
1282 * Requests that carry congestion control credits are added
1283 * to the head of the list to avoid starvation issues.
1284 */
1285 if (req->rq_cong) {
1286 xprt_clear_congestion_window_wait(xprt);
1287 list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1288 if (pos->rq_cong)
1289 continue;
1290 /* Note: req is added _before_ pos */
1291 list_add_tail(&req->rq_xmit, &pos->rq_xmit);
1292 INIT_LIST_HEAD(&req->rq_xmit2);
1293 trace_xprt_enq_xmit(task, 1);
1294 goto out;
1295 }
1296 } else if (RPC_IS_SWAPPER(task)) {
1297 list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1298 if (pos->rq_cong || pos->rq_bytes_sent)
1299 continue;
1300 if (RPC_IS_SWAPPER(pos->rq_task))
1301 continue;
1302 /* Note: req is added _before_ pos */
1303 list_add_tail(&req->rq_xmit, &pos->rq_xmit);
1304 INIT_LIST_HEAD(&req->rq_xmit2);
1305 trace_xprt_enq_xmit(task, 2);
1306 goto out;
1307 }
1308 } else if (!req->rq_seqno) {
1309 list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1310 if (pos->rq_task->tk_owner != task->tk_owner)
1311 continue;
1312 list_add_tail(&req->rq_xmit2, &pos->rq_xmit2);
1313 INIT_LIST_HEAD(&req->rq_xmit);
1314 trace_xprt_enq_xmit(task, 3);
1315 goto out;
1316 }
1317 }
1318 list_add_tail(&req->rq_xmit, &xprt->xmit_queue);
1319 INIT_LIST_HEAD(&req->rq_xmit2);
1320 trace_xprt_enq_xmit(task, 4);
1321 out:
1322 set_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
1323 spin_unlock(&xprt->queue_lock);
1324 }
1325 }
1326
1327 /**
1328 * xprt_request_dequeue_transmit_locked - remove a task from the transmission queue
1329 * @task: pointer to rpc_task
1330 *
1331 * Remove a task from the transmission queue
1332 * Caller must hold xprt->queue_lock
1333 */
1334 static void
xprt_request_dequeue_transmit_locked(struct rpc_task * task)1335 xprt_request_dequeue_transmit_locked(struct rpc_task *task)
1336 {
1337 struct rpc_rqst *req = task->tk_rqstp;
1338
1339 if (!test_and_clear_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1340 return;
1341 if (!list_empty(&req->rq_xmit)) {
1342 list_del(&req->rq_xmit);
1343 if (!list_empty(&req->rq_xmit2)) {
1344 struct rpc_rqst *next = list_first_entry(&req->rq_xmit2,
1345 struct rpc_rqst, rq_xmit2);
1346 list_del(&req->rq_xmit2);
1347 list_add_tail(&next->rq_xmit, &next->rq_xprt->xmit_queue);
1348 }
1349 } else
1350 list_del(&req->rq_xmit2);
1351 }
1352
1353 /**
1354 * xprt_request_dequeue_transmit - remove a task from the transmission queue
1355 * @task: pointer to rpc_task
1356 *
1357 * Remove a task from the transmission queue
1358 */
1359 static void
xprt_request_dequeue_transmit(struct rpc_task * task)1360 xprt_request_dequeue_transmit(struct rpc_task *task)
1361 {
1362 struct rpc_rqst *req = task->tk_rqstp;
1363 struct rpc_xprt *xprt = req->rq_xprt;
1364
1365 spin_lock(&xprt->queue_lock);
1366 xprt_request_dequeue_transmit_locked(task);
1367 spin_unlock(&xprt->queue_lock);
1368 }
1369
1370 /**
1371 * xprt_request_dequeue_xprt - remove a task from the transmit+receive queue
1372 * @task: pointer to rpc_task
1373 *
1374 * Remove a task from the transmit and receive queues, and ensure that
1375 * it is not pinned by the receive work item.
1376 */
1377 void
xprt_request_dequeue_xprt(struct rpc_task * task)1378 xprt_request_dequeue_xprt(struct rpc_task *task)
1379 {
1380 struct rpc_rqst *req = task->tk_rqstp;
1381 struct rpc_xprt *xprt = req->rq_xprt;
1382
1383 if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate) ||
1384 test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) ||
1385 xprt_is_pinned_rqst(req)) {
1386 spin_lock(&xprt->queue_lock);
1387 xprt_request_dequeue_transmit_locked(task);
1388 xprt_request_dequeue_receive_locked(task);
1389 while (xprt_is_pinned_rqst(req)) {
1390 set_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
1391 spin_unlock(&xprt->queue_lock);
1392 xprt_wait_on_pinned_rqst(req);
1393 spin_lock(&xprt->queue_lock);
1394 clear_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
1395 }
1396 spin_unlock(&xprt->queue_lock);
1397 }
1398 }
1399
1400 /**
1401 * xprt_request_prepare - prepare an encoded request for transport
1402 * @req: pointer to rpc_rqst
1403 *
1404 * Calls into the transport layer to do whatever is needed to prepare
1405 * the request for transmission or receive.
1406 */
1407 void
xprt_request_prepare(struct rpc_rqst * req)1408 xprt_request_prepare(struct rpc_rqst *req)
1409 {
1410 struct rpc_xprt *xprt = req->rq_xprt;
1411
1412 if (xprt->ops->prepare_request)
1413 xprt->ops->prepare_request(req);
1414 }
1415
1416 /**
1417 * xprt_request_need_retransmit - Test if a task needs retransmission
1418 * @task: pointer to rpc_task
1419 *
1420 * Test for whether a connection breakage requires the task to retransmit
1421 */
1422 bool
xprt_request_need_retransmit(struct rpc_task * task)1423 xprt_request_need_retransmit(struct rpc_task *task)
1424 {
1425 return xprt_request_retransmit_after_disconnect(task);
1426 }
1427
1428 /**
1429 * xprt_prepare_transmit - reserve the transport before sending a request
1430 * @task: RPC task about to send a request
1431 *
1432 */
xprt_prepare_transmit(struct rpc_task * task)1433 bool xprt_prepare_transmit(struct rpc_task *task)
1434 {
1435 struct rpc_rqst *req = task->tk_rqstp;
1436 struct rpc_xprt *xprt = req->rq_xprt;
1437
1438 dprintk("RPC: %5u xprt_prepare_transmit\n", task->tk_pid);
1439
1440 if (!xprt_lock_write(xprt, task)) {
1441 /* Race breaker: someone may have transmitted us */
1442 if (!test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1443 rpc_wake_up_queued_task_set_status(&xprt->sending,
1444 task, 0);
1445 return false;
1446
1447 }
1448 return true;
1449 }
1450
xprt_end_transmit(struct rpc_task * task)1451 void xprt_end_transmit(struct rpc_task *task)
1452 {
1453 xprt_release_write(task->tk_rqstp->rq_xprt, task);
1454 }
1455
1456 /**
1457 * xprt_request_transmit - send an RPC request on a transport
1458 * @req: pointer to request to transmit
1459 * @snd_task: RPC task that owns the transport lock
1460 *
1461 * This performs the transmission of a single request.
1462 * Note that if the request is not the same as snd_task, then it
1463 * does need to be pinned.
1464 * Returns '0' on success.
1465 */
1466 static int
xprt_request_transmit(struct rpc_rqst * req,struct rpc_task * snd_task)1467 xprt_request_transmit(struct rpc_rqst *req, struct rpc_task *snd_task)
1468 {
1469 struct rpc_xprt *xprt = req->rq_xprt;
1470 struct rpc_task *task = req->rq_task;
1471 unsigned int connect_cookie;
1472 int is_retrans = RPC_WAS_SENT(task);
1473 int status;
1474
1475 if (!req->rq_bytes_sent) {
1476 if (xprt_request_data_received(task)) {
1477 status = 0;
1478 goto out_dequeue;
1479 }
1480 /* Verify that our message lies in the RPCSEC_GSS window */
1481 if (rpcauth_xmit_need_reencode(task)) {
1482 status = -EBADMSG;
1483 goto out_dequeue;
1484 }
1485 if (RPC_SIGNALLED(task)) {
1486 status = -ERESTARTSYS;
1487 goto out_dequeue;
1488 }
1489 }
1490
1491 /*
1492 * Update req->rq_ntrans before transmitting to avoid races with
1493 * xprt_update_rtt(), which needs to know that it is recording a
1494 * reply to the first transmission.
1495 */
1496 req->rq_ntrans++;
1497
1498 connect_cookie = xprt->connect_cookie;
1499 status = xprt->ops->send_request(req);
1500 if (status != 0) {
1501 req->rq_ntrans--;
1502 trace_xprt_transmit(req, status);
1503 return status;
1504 }
1505
1506 if (is_retrans)
1507 task->tk_client->cl_stats->rpcretrans++;
1508
1509 xprt_inject_disconnect(xprt);
1510
1511 task->tk_flags |= RPC_TASK_SENT;
1512 spin_lock(&xprt->transport_lock);
1513
1514 xprt->stat.sends++;
1515 xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs;
1516 xprt->stat.bklog_u += xprt->backlog.qlen;
1517 xprt->stat.sending_u += xprt->sending.qlen;
1518 xprt->stat.pending_u += xprt->pending.qlen;
1519 spin_unlock(&xprt->transport_lock);
1520
1521 req->rq_connect_cookie = connect_cookie;
1522 out_dequeue:
1523 trace_xprt_transmit(req, status);
1524 xprt_request_dequeue_transmit(task);
1525 rpc_wake_up_queued_task_set_status(&xprt->sending, task, status);
1526 return status;
1527 }
1528
1529 /**
1530 * xprt_transmit - send an RPC request on a transport
1531 * @task: controlling RPC task
1532 *
1533 * Attempts to drain the transmit queue. On exit, either the transport
1534 * signalled an error that needs to be handled before transmission can
1535 * resume, or @task finished transmitting, and detected that it already
1536 * received a reply.
1537 */
1538 void
xprt_transmit(struct rpc_task * task)1539 xprt_transmit(struct rpc_task *task)
1540 {
1541 struct rpc_rqst *next, *req = task->tk_rqstp;
1542 struct rpc_xprt *xprt = req->rq_xprt;
1543 int status;
1544
1545 spin_lock(&xprt->queue_lock);
1546 for (;;) {
1547 next = list_first_entry_or_null(&xprt->xmit_queue,
1548 struct rpc_rqst, rq_xmit);
1549 if (!next)
1550 break;
1551 xprt_pin_rqst(next);
1552 spin_unlock(&xprt->queue_lock);
1553 status = xprt_request_transmit(next, task);
1554 if (status == -EBADMSG && next != req)
1555 status = 0;
1556 spin_lock(&xprt->queue_lock);
1557 xprt_unpin_rqst(next);
1558 if (status < 0) {
1559 if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1560 task->tk_status = status;
1561 break;
1562 }
1563 /* Was @task transmitted, and has it received a reply? */
1564 if (xprt_request_data_received(task) &&
1565 !test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1566 break;
1567 cond_resched_lock(&xprt->queue_lock);
1568 }
1569 spin_unlock(&xprt->queue_lock);
1570 }
1571
xprt_add_backlog(struct rpc_xprt * xprt,struct rpc_task * task)1572 static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
1573 {
1574 set_bit(XPRT_CONGESTED, &xprt->state);
1575 rpc_sleep_on(&xprt->backlog, task, NULL);
1576 }
1577
xprt_wake_up_backlog(struct rpc_xprt * xprt)1578 static void xprt_wake_up_backlog(struct rpc_xprt *xprt)
1579 {
1580 if (rpc_wake_up_next(&xprt->backlog) == NULL)
1581 clear_bit(XPRT_CONGESTED, &xprt->state);
1582 }
1583
xprt_throttle_congested(struct rpc_xprt * xprt,struct rpc_task * task)1584 static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task)
1585 {
1586 bool ret = false;
1587
1588 if (!test_bit(XPRT_CONGESTED, &xprt->state))
1589 goto out;
1590 spin_lock(&xprt->reserve_lock);
1591 if (test_bit(XPRT_CONGESTED, &xprt->state)) {
1592 rpc_sleep_on(&xprt->backlog, task, NULL);
1593 ret = true;
1594 }
1595 spin_unlock(&xprt->reserve_lock);
1596 out:
1597 return ret;
1598 }
1599
xprt_dynamic_alloc_slot(struct rpc_xprt * xprt)1600 static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt)
1601 {
1602 struct rpc_rqst *req = ERR_PTR(-EAGAIN);
1603
1604 if (xprt->num_reqs >= xprt->max_reqs)
1605 goto out;
1606 ++xprt->num_reqs;
1607 spin_unlock(&xprt->reserve_lock);
1608 req = kzalloc(sizeof(struct rpc_rqst), GFP_NOFS);
1609 spin_lock(&xprt->reserve_lock);
1610 if (req != NULL)
1611 goto out;
1612 --xprt->num_reqs;
1613 req = ERR_PTR(-ENOMEM);
1614 out:
1615 return req;
1616 }
1617
xprt_dynamic_free_slot(struct rpc_xprt * xprt,struct rpc_rqst * req)1618 static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1619 {
1620 if (xprt->num_reqs > xprt->min_reqs) {
1621 --xprt->num_reqs;
1622 kfree(req);
1623 return true;
1624 }
1625 return false;
1626 }
1627
xprt_alloc_slot(struct rpc_xprt * xprt,struct rpc_task * task)1628 void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
1629 {
1630 struct rpc_rqst *req;
1631
1632 spin_lock(&xprt->reserve_lock);
1633 if (!list_empty(&xprt->free)) {
1634 req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
1635 list_del(&req->rq_list);
1636 goto out_init_req;
1637 }
1638 req = xprt_dynamic_alloc_slot(xprt);
1639 if (!IS_ERR(req))
1640 goto out_init_req;
1641 switch (PTR_ERR(req)) {
1642 case -ENOMEM:
1643 dprintk("RPC: dynamic allocation of request slot "
1644 "failed! Retrying\n");
1645 task->tk_status = -ENOMEM;
1646 break;
1647 case -EAGAIN:
1648 xprt_add_backlog(xprt, task);
1649 dprintk("RPC: waiting for request slot\n");
1650 /* fall through */
1651 default:
1652 task->tk_status = -EAGAIN;
1653 }
1654 spin_unlock(&xprt->reserve_lock);
1655 return;
1656 out_init_req:
1657 xprt->stat.max_slots = max_t(unsigned int, xprt->stat.max_slots,
1658 xprt->num_reqs);
1659 spin_unlock(&xprt->reserve_lock);
1660
1661 task->tk_status = 0;
1662 task->tk_rqstp = req;
1663 }
1664 EXPORT_SYMBOL_GPL(xprt_alloc_slot);
1665
xprt_free_slot(struct rpc_xprt * xprt,struct rpc_rqst * req)1666 void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1667 {
1668 spin_lock(&xprt->reserve_lock);
1669 if (!xprt_dynamic_free_slot(xprt, req)) {
1670 memset(req, 0, sizeof(*req)); /* mark unused */
1671 list_add(&req->rq_list, &xprt->free);
1672 }
1673 xprt_wake_up_backlog(xprt);
1674 spin_unlock(&xprt->reserve_lock);
1675 }
1676 EXPORT_SYMBOL_GPL(xprt_free_slot);
1677
xprt_free_all_slots(struct rpc_xprt * xprt)1678 static void xprt_free_all_slots(struct rpc_xprt *xprt)
1679 {
1680 struct rpc_rqst *req;
1681 while (!list_empty(&xprt->free)) {
1682 req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list);
1683 list_del(&req->rq_list);
1684 kfree(req);
1685 }
1686 }
1687
xprt_alloc(struct net * net,size_t size,unsigned int num_prealloc,unsigned int max_alloc)1688 struct rpc_xprt *xprt_alloc(struct net *net, size_t size,
1689 unsigned int num_prealloc,
1690 unsigned int max_alloc)
1691 {
1692 struct rpc_xprt *xprt;
1693 struct rpc_rqst *req;
1694 int i;
1695
1696 xprt = kzalloc(size, GFP_KERNEL);
1697 if (xprt == NULL)
1698 goto out;
1699
1700 xprt_init(xprt, net);
1701
1702 for (i = 0; i < num_prealloc; i++) {
1703 req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL);
1704 if (!req)
1705 goto out_free;
1706 list_add(&req->rq_list, &xprt->free);
1707 }
1708 if (max_alloc > num_prealloc)
1709 xprt->max_reqs = max_alloc;
1710 else
1711 xprt->max_reqs = num_prealloc;
1712 xprt->min_reqs = num_prealloc;
1713 xprt->num_reqs = num_prealloc;
1714
1715 return xprt;
1716
1717 out_free:
1718 xprt_free(xprt);
1719 out:
1720 return NULL;
1721 }
1722 EXPORT_SYMBOL_GPL(xprt_alloc);
1723
xprt_free(struct rpc_xprt * xprt)1724 void xprt_free(struct rpc_xprt *xprt)
1725 {
1726 put_net(xprt->xprt_net);
1727 xprt_free_all_slots(xprt);
1728 kfree_rcu(xprt, rcu);
1729 }
1730 EXPORT_SYMBOL_GPL(xprt_free);
1731
1732 static void
xprt_init_connect_cookie(struct rpc_rqst * req,struct rpc_xprt * xprt)1733 xprt_init_connect_cookie(struct rpc_rqst *req, struct rpc_xprt *xprt)
1734 {
1735 req->rq_connect_cookie = xprt_connect_cookie(xprt) - 1;
1736 }
1737
1738 static __be32
xprt_alloc_xid(struct rpc_xprt * xprt)1739 xprt_alloc_xid(struct rpc_xprt *xprt)
1740 {
1741 __be32 xid;
1742
1743 spin_lock(&xprt->reserve_lock);
1744 xid = (__force __be32)xprt->xid++;
1745 spin_unlock(&xprt->reserve_lock);
1746 return xid;
1747 }
1748
1749 static void
xprt_init_xid(struct rpc_xprt * xprt)1750 xprt_init_xid(struct rpc_xprt *xprt)
1751 {
1752 xprt->xid = prandom_u32();
1753 }
1754
1755 static void
xprt_request_init(struct rpc_task * task)1756 xprt_request_init(struct rpc_task *task)
1757 {
1758 struct rpc_xprt *xprt = task->tk_xprt;
1759 struct rpc_rqst *req = task->tk_rqstp;
1760
1761 req->rq_task = task;
1762 req->rq_xprt = xprt;
1763 req->rq_buffer = NULL;
1764 req->rq_xid = xprt_alloc_xid(xprt);
1765 xprt_init_connect_cookie(req, xprt);
1766 req->rq_snd_buf.len = 0;
1767 req->rq_snd_buf.buflen = 0;
1768 req->rq_rcv_buf.len = 0;
1769 req->rq_rcv_buf.buflen = 0;
1770 req->rq_snd_buf.bvec = NULL;
1771 req->rq_rcv_buf.bvec = NULL;
1772 req->rq_release_snd_buf = NULL;
1773 xprt_init_majortimeo(task, req);
1774 dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid,
1775 req, ntohl(req->rq_xid));
1776 }
1777
1778 static void
xprt_do_reserve(struct rpc_xprt * xprt,struct rpc_task * task)1779 xprt_do_reserve(struct rpc_xprt *xprt, struct rpc_task *task)
1780 {
1781 xprt->ops->alloc_slot(xprt, task);
1782 if (task->tk_rqstp != NULL)
1783 xprt_request_init(task);
1784 }
1785
1786 /**
1787 * xprt_reserve - allocate an RPC request slot
1788 * @task: RPC task requesting a slot allocation
1789 *
1790 * If the transport is marked as being congested, or if no more
1791 * slots are available, place the task on the transport's
1792 * backlog queue.
1793 */
xprt_reserve(struct rpc_task * task)1794 void xprt_reserve(struct rpc_task *task)
1795 {
1796 struct rpc_xprt *xprt = task->tk_xprt;
1797
1798 task->tk_status = 0;
1799 if (task->tk_rqstp != NULL)
1800 return;
1801
1802 task->tk_status = -EAGAIN;
1803 if (!xprt_throttle_congested(xprt, task))
1804 xprt_do_reserve(xprt, task);
1805 }
1806
1807 /**
1808 * xprt_retry_reserve - allocate an RPC request slot
1809 * @task: RPC task requesting a slot allocation
1810 *
1811 * If no more slots are available, place the task on the transport's
1812 * backlog queue.
1813 * Note that the only difference with xprt_reserve is that we now
1814 * ignore the value of the XPRT_CONGESTED flag.
1815 */
xprt_retry_reserve(struct rpc_task * task)1816 void xprt_retry_reserve(struct rpc_task *task)
1817 {
1818 struct rpc_xprt *xprt = task->tk_xprt;
1819
1820 task->tk_status = 0;
1821 if (task->tk_rqstp != NULL)
1822 return;
1823
1824 task->tk_status = -EAGAIN;
1825 xprt_do_reserve(xprt, task);
1826 }
1827
1828 /**
1829 * xprt_release - release an RPC request slot
1830 * @task: task which is finished with the slot
1831 *
1832 */
xprt_release(struct rpc_task * task)1833 void xprt_release(struct rpc_task *task)
1834 {
1835 struct rpc_xprt *xprt;
1836 struct rpc_rqst *req = task->tk_rqstp;
1837
1838 if (req == NULL) {
1839 if (task->tk_client) {
1840 xprt = task->tk_xprt;
1841 xprt_release_write(xprt, task);
1842 }
1843 return;
1844 }
1845
1846 xprt = req->rq_xprt;
1847 xprt_request_dequeue_xprt(task);
1848 spin_lock(&xprt->transport_lock);
1849 xprt->ops->release_xprt(xprt, task);
1850 if (xprt->ops->release_request)
1851 xprt->ops->release_request(task);
1852 xprt_schedule_autodisconnect(xprt);
1853 spin_unlock(&xprt->transport_lock);
1854 if (req->rq_buffer)
1855 xprt->ops->buf_free(task);
1856 xprt_inject_disconnect(xprt);
1857 xdr_free_bvec(&req->rq_rcv_buf);
1858 xdr_free_bvec(&req->rq_snd_buf);
1859 if (req->rq_cred != NULL)
1860 put_rpccred(req->rq_cred);
1861 task->tk_rqstp = NULL;
1862 if (req->rq_release_snd_buf)
1863 req->rq_release_snd_buf(req);
1864
1865 dprintk("RPC: %5u release request %p\n", task->tk_pid, req);
1866 if (likely(!bc_prealloc(req)))
1867 xprt->ops->free_slot(xprt, req);
1868 else
1869 xprt_free_bc_request(req);
1870 }
1871
1872 #ifdef CONFIG_SUNRPC_BACKCHANNEL
1873 void
xprt_init_bc_request(struct rpc_rqst * req,struct rpc_task * task)1874 xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task)
1875 {
1876 struct xdr_buf *xbufp = &req->rq_snd_buf;
1877
1878 task->tk_rqstp = req;
1879 req->rq_task = task;
1880 xprt_init_connect_cookie(req, req->rq_xprt);
1881 /*
1882 * Set up the xdr_buf length.
1883 * This also indicates that the buffer is XDR encoded already.
1884 */
1885 xbufp->len = xbufp->head[0].iov_len + xbufp->page_len +
1886 xbufp->tail[0].iov_len;
1887 }
1888 #endif
1889
xprt_init(struct rpc_xprt * xprt,struct net * net)1890 static void xprt_init(struct rpc_xprt *xprt, struct net *net)
1891 {
1892 kref_init(&xprt->kref);
1893
1894 spin_lock_init(&xprt->transport_lock);
1895 spin_lock_init(&xprt->reserve_lock);
1896 spin_lock_init(&xprt->queue_lock);
1897
1898 INIT_LIST_HEAD(&xprt->free);
1899 xprt->recv_queue = RB_ROOT;
1900 INIT_LIST_HEAD(&xprt->xmit_queue);
1901 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1902 spin_lock_init(&xprt->bc_pa_lock);
1903 INIT_LIST_HEAD(&xprt->bc_pa_list);
1904 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
1905 INIT_LIST_HEAD(&xprt->xprt_switch);
1906
1907 xprt->last_used = jiffies;
1908 xprt->cwnd = RPC_INITCWND;
1909 xprt->bind_index = 0;
1910
1911 rpc_init_wait_queue(&xprt->binding, "xprt_binding");
1912 rpc_init_wait_queue(&xprt->pending, "xprt_pending");
1913 rpc_init_wait_queue(&xprt->sending, "xprt_sending");
1914 rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");
1915
1916 xprt_init_xid(xprt);
1917
1918 xprt->xprt_net = get_net(net);
1919 }
1920
1921 /**
1922 * xprt_create_transport - create an RPC transport
1923 * @args: rpc transport creation arguments
1924 *
1925 */
xprt_create_transport(struct xprt_create * args)1926 struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
1927 {
1928 struct rpc_xprt *xprt;
1929 struct xprt_class *t;
1930
1931 spin_lock(&xprt_list_lock);
1932 list_for_each_entry(t, &xprt_list, list) {
1933 if (t->ident == args->ident) {
1934 spin_unlock(&xprt_list_lock);
1935 goto found;
1936 }
1937 }
1938 spin_unlock(&xprt_list_lock);
1939 dprintk("RPC: transport (%d) not supported\n", args->ident);
1940 return ERR_PTR(-EIO);
1941
1942 found:
1943 xprt = t->setup(args);
1944 if (IS_ERR(xprt)) {
1945 dprintk("RPC: xprt_create_transport: failed, %ld\n",
1946 -PTR_ERR(xprt));
1947 goto out;
1948 }
1949 if (args->flags & XPRT_CREATE_NO_IDLE_TIMEOUT)
1950 xprt->idle_timeout = 0;
1951 INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
1952 if (xprt_has_timer(xprt))
1953 timer_setup(&xprt->timer, xprt_init_autodisconnect, 0);
1954 else
1955 timer_setup(&xprt->timer, NULL, 0);
1956
1957 if (strlen(args->servername) > RPC_MAXNETNAMELEN) {
1958 xprt_destroy(xprt);
1959 return ERR_PTR(-EINVAL);
1960 }
1961 xprt->servername = kstrdup(args->servername, GFP_KERNEL);
1962 if (xprt->servername == NULL) {
1963 xprt_destroy(xprt);
1964 return ERR_PTR(-ENOMEM);
1965 }
1966
1967 rpc_xprt_debugfs_register(xprt);
1968
1969 dprintk("RPC: created transport %p with %u slots\n", xprt,
1970 xprt->max_reqs);
1971 out:
1972 return xprt;
1973 }
1974
xprt_destroy_cb(struct work_struct * work)1975 static void xprt_destroy_cb(struct work_struct *work)
1976 {
1977 struct rpc_xprt *xprt =
1978 container_of(work, struct rpc_xprt, task_cleanup);
1979
1980 rpc_xprt_debugfs_unregister(xprt);
1981 rpc_destroy_wait_queue(&xprt->binding);
1982 rpc_destroy_wait_queue(&xprt->pending);
1983 rpc_destroy_wait_queue(&xprt->sending);
1984 rpc_destroy_wait_queue(&xprt->backlog);
1985 kfree(xprt->servername);
1986 /*
1987 * Destroy any existing back channel
1988 */
1989 xprt_destroy_backchannel(xprt, UINT_MAX);
1990
1991 /*
1992 * Tear down transport state and free the rpc_xprt
1993 */
1994 xprt->ops->destroy(xprt);
1995 }
1996
1997 /**
1998 * xprt_destroy - destroy an RPC transport, killing off all requests.
1999 * @xprt: transport to destroy
2000 *
2001 */
xprt_destroy(struct rpc_xprt * xprt)2002 static void xprt_destroy(struct rpc_xprt *xprt)
2003 {
2004 dprintk("RPC: destroying transport %p\n", xprt);
2005
2006 /*
2007 * Exclude transport connect/disconnect handlers and autoclose
2008 */
2009 wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE);
2010
2011 /*
2012 * xprt_schedule_autodisconnect() can run after XPRT_LOCKED
2013 * is cleared. We use ->transport_lock to ensure the mod_timer()
2014 * can only run *before* del_time_sync(), never after.
2015 */
2016 spin_lock(&xprt->transport_lock);
2017 del_timer_sync(&xprt->timer);
2018 spin_unlock(&xprt->transport_lock);
2019
2020 /*
2021 * Destroy sockets etc from the system workqueue so they can
2022 * safely flush receive work running on rpciod.
2023 */
2024 INIT_WORK(&xprt->task_cleanup, xprt_destroy_cb);
2025 schedule_work(&xprt->task_cleanup);
2026 }
2027
xprt_destroy_kref(struct kref * kref)2028 static void xprt_destroy_kref(struct kref *kref)
2029 {
2030 xprt_destroy(container_of(kref, struct rpc_xprt, kref));
2031 }
2032
2033 /**
2034 * xprt_get - return a reference to an RPC transport.
2035 * @xprt: pointer to the transport
2036 *
2037 */
xprt_get(struct rpc_xprt * xprt)2038 struct rpc_xprt *xprt_get(struct rpc_xprt *xprt)
2039 {
2040 if (xprt != NULL && kref_get_unless_zero(&xprt->kref))
2041 return xprt;
2042 return NULL;
2043 }
2044 EXPORT_SYMBOL_GPL(xprt_get);
2045
2046 /**
2047 * xprt_put - release a reference to an RPC transport.
2048 * @xprt: pointer to the transport
2049 *
2050 */
xprt_put(struct rpc_xprt * xprt)2051 void xprt_put(struct rpc_xprt *xprt)
2052 {
2053 if (xprt != NULL)
2054 kref_put(&xprt->kref, xprt_destroy_kref);
2055 }
2056 EXPORT_SYMBOL_GPL(xprt_put);
2057