• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/net/sunrpc/xprt.c
4  *
5  *  This is a generic RPC call interface supporting congestion avoidance,
6  *  and asynchronous calls.
7  *
8  *  The interface works like this:
9  *
10  *  -	When a process places a call, it allocates a request slot if
11  *	one is available. Otherwise, it sleeps on the backlog queue
12  *	(xprt_reserve).
13  *  -	Next, the caller puts together the RPC message, stuffs it into
14  *	the request struct, and calls xprt_transmit().
15  *  -	xprt_transmit sends the message and installs the caller on the
16  *	transport's wait list. At the same time, if a reply is expected,
17  *	it installs a timer that is run after the packet's timeout has
18  *	expired.
19  *  -	When a packet arrives, the data_ready handler walks the list of
20  *	pending requests for that transport. If a matching XID is found, the
21  *	caller is woken up, and the timer removed.
22  *  -	When no reply arrives within the timeout interval, the timer is
23  *	fired by the kernel and runs xprt_timer(). It either adjusts the
24  *	timeout values (minor timeout) or wakes up the caller with a status
25  *	of -ETIMEDOUT.
26  *  -	When the caller receives a notification from RPC that a reply arrived,
27  *	it should release the RPC slot, and process the reply.
28  *	If the call timed out, it may choose to retry the operation by
29  *	adjusting the initial timeout value, and simply calling rpc_call
30  *	again.
31  *
32  *  Support for async RPC is done through a set of RPC-specific scheduling
33  *  primitives that `transparently' work for processes as well as async
34  *  tasks that rely on callbacks.
35  *
36  *  Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
37  *
38  *  Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com>
39  */
40 
41 #include <linux/module.h>
42 
43 #include <linux/types.h>
44 #include <linux/interrupt.h>
45 #include <linux/workqueue.h>
46 #include <linux/net.h>
47 #include <linux/ktime.h>
48 
49 #include <linux/sunrpc/clnt.h>
50 #include <linux/sunrpc/metrics.h>
51 #include <linux/sunrpc/bc_xprt.h>
52 #include <linux/rcupdate.h>
53 #include <linux/sched/mm.h>
54 
55 #include <trace/events/sunrpc.h>
56 
57 #include "sunrpc.h"
58 #include "sysfs.h"
59 #include "fail.h"
60 
61 /*
62  * Local variables
63  */
64 
65 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
66 # define RPCDBG_FACILITY	RPCDBG_XPRT
67 #endif
68 
69 /*
70  * Local functions
71  */
72 static void	 xprt_init(struct rpc_xprt *xprt, struct net *net);
73 static __be32	xprt_alloc_xid(struct rpc_xprt *xprt);
74 static void	 xprt_destroy(struct rpc_xprt *xprt);
75 static void	 xprt_request_init(struct rpc_task *task);
76 
77 static DEFINE_SPINLOCK(xprt_list_lock);
78 static LIST_HEAD(xprt_list);
79 
xprt_request_timeout(const struct rpc_rqst * req)80 static unsigned long xprt_request_timeout(const struct rpc_rqst *req)
81 {
82 	unsigned long timeout = jiffies + req->rq_timeout;
83 
84 	if (time_before(timeout, req->rq_majortimeo))
85 		return timeout;
86 	return req->rq_majortimeo;
87 }
88 
89 /**
90  * xprt_register_transport - register a transport implementation
91  * @transport: transport to register
92  *
93  * If a transport implementation is loaded as a kernel module, it can
94  * call this interface to make itself known to the RPC client.
95  *
96  * Returns:
97  * 0:		transport successfully registered
98  * -EEXIST:	transport already registered
99  * -EINVAL:	transport module being unloaded
100  */
xprt_register_transport(struct xprt_class * transport)101 int xprt_register_transport(struct xprt_class *transport)
102 {
103 	struct xprt_class *t;
104 	int result;
105 
106 	result = -EEXIST;
107 	spin_lock(&xprt_list_lock);
108 	list_for_each_entry(t, &xprt_list, list) {
109 		/* don't register the same transport class twice */
110 		if (t->ident == transport->ident)
111 			goto out;
112 	}
113 
114 	list_add_tail(&transport->list, &xprt_list);
115 	printk(KERN_INFO "RPC: Registered %s transport module.\n",
116 	       transport->name);
117 	result = 0;
118 
119 out:
120 	spin_unlock(&xprt_list_lock);
121 	return result;
122 }
123 EXPORT_SYMBOL_GPL(xprt_register_transport);
124 
125 /**
126  * xprt_unregister_transport - unregister a transport implementation
127  * @transport: transport to unregister
128  *
129  * Returns:
130  * 0:		transport successfully unregistered
131  * -ENOENT:	transport never registered
132  */
xprt_unregister_transport(struct xprt_class * transport)133 int xprt_unregister_transport(struct xprt_class *transport)
134 {
135 	struct xprt_class *t;
136 	int result;
137 
138 	result = 0;
139 	spin_lock(&xprt_list_lock);
140 	list_for_each_entry(t, &xprt_list, list) {
141 		if (t == transport) {
142 			printk(KERN_INFO
143 				"RPC: Unregistered %s transport module.\n",
144 				transport->name);
145 			list_del_init(&transport->list);
146 			goto out;
147 		}
148 	}
149 	result = -ENOENT;
150 
151 out:
152 	spin_unlock(&xprt_list_lock);
153 	return result;
154 }
155 EXPORT_SYMBOL_GPL(xprt_unregister_transport);
156 
157 static void
xprt_class_release(const struct xprt_class * t)158 xprt_class_release(const struct xprt_class *t)
159 {
160 	module_put(t->owner);
161 }
162 
163 static const struct xprt_class *
xprt_class_find_by_ident_locked(int ident)164 xprt_class_find_by_ident_locked(int ident)
165 {
166 	const struct xprt_class *t;
167 
168 	list_for_each_entry(t, &xprt_list, list) {
169 		if (t->ident != ident)
170 			continue;
171 		if (!try_module_get(t->owner))
172 			continue;
173 		return t;
174 	}
175 	return NULL;
176 }
177 
178 static const struct xprt_class *
xprt_class_find_by_ident(int ident)179 xprt_class_find_by_ident(int ident)
180 {
181 	const struct xprt_class *t;
182 
183 	spin_lock(&xprt_list_lock);
184 	t = xprt_class_find_by_ident_locked(ident);
185 	spin_unlock(&xprt_list_lock);
186 	return t;
187 }
188 
189 static const struct xprt_class *
xprt_class_find_by_netid_locked(const char * netid)190 xprt_class_find_by_netid_locked(const char *netid)
191 {
192 	const struct xprt_class *t;
193 	unsigned int i;
194 
195 	list_for_each_entry(t, &xprt_list, list) {
196 		for (i = 0; t->netid[i][0] != '\0'; i++) {
197 			if (strcmp(t->netid[i], netid) != 0)
198 				continue;
199 			if (!try_module_get(t->owner))
200 				continue;
201 			return t;
202 		}
203 	}
204 	return NULL;
205 }
206 
207 static const struct xprt_class *
xprt_class_find_by_netid(const char * netid)208 xprt_class_find_by_netid(const char *netid)
209 {
210 	const struct xprt_class *t;
211 
212 	spin_lock(&xprt_list_lock);
213 	t = xprt_class_find_by_netid_locked(netid);
214 	if (!t) {
215 		spin_unlock(&xprt_list_lock);
216 		request_module("rpc%s", netid);
217 		spin_lock(&xprt_list_lock);
218 		t = xprt_class_find_by_netid_locked(netid);
219 	}
220 	spin_unlock(&xprt_list_lock);
221 	return t;
222 }
223 
224 /**
225  * xprt_find_transport_ident - convert a netid into a transport identifier
226  * @netid: transport to load
227  *
228  * Returns:
229  * > 0:		transport identifier
230  * -ENOENT:	transport module not available
231  */
xprt_find_transport_ident(const char * netid)232 int xprt_find_transport_ident(const char *netid)
233 {
234 	const struct xprt_class *t;
235 	int ret;
236 
237 	t = xprt_class_find_by_netid(netid);
238 	if (!t)
239 		return -ENOENT;
240 	ret = t->ident;
241 	xprt_class_release(t);
242 	return ret;
243 }
244 EXPORT_SYMBOL_GPL(xprt_find_transport_ident);
245 
xprt_clear_locked(struct rpc_xprt * xprt)246 static void xprt_clear_locked(struct rpc_xprt *xprt)
247 {
248 	xprt->snd_task = NULL;
249 	if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
250 		smp_mb__before_atomic();
251 		clear_bit(XPRT_LOCKED, &xprt->state);
252 		smp_mb__after_atomic();
253 	} else
254 		queue_work(xprtiod_workqueue, &xprt->task_cleanup);
255 }
256 
257 /**
258  * xprt_reserve_xprt - serialize write access to transports
259  * @task: task that is requesting access to the transport
260  * @xprt: pointer to the target transport
261  *
262  * This prevents mixing the payload of separate requests, and prevents
263  * transport connects from colliding with writes.  No congestion control
264  * is provided.
265  */
xprt_reserve_xprt(struct rpc_xprt * xprt,struct rpc_task * task)266 int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
267 {
268 	struct rpc_rqst *req = task->tk_rqstp;
269 
270 	if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
271 		if (task == xprt->snd_task)
272 			goto out_locked;
273 		goto out_sleep;
274 	}
275 	if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
276 		goto out_unlock;
277 	xprt->snd_task = task;
278 
279 out_locked:
280 	trace_xprt_reserve_xprt(xprt, task);
281 	return 1;
282 
283 out_unlock:
284 	xprt_clear_locked(xprt);
285 out_sleep:
286 	task->tk_status = -EAGAIN;
287 	if  (RPC_IS_SOFT(task))
288 		rpc_sleep_on_timeout(&xprt->sending, task, NULL,
289 				xprt_request_timeout(req));
290 	else
291 		rpc_sleep_on(&xprt->sending, task, NULL);
292 	return 0;
293 }
294 EXPORT_SYMBOL_GPL(xprt_reserve_xprt);
295 
296 static bool
xprt_need_congestion_window_wait(struct rpc_xprt * xprt)297 xprt_need_congestion_window_wait(struct rpc_xprt *xprt)
298 {
299 	return test_bit(XPRT_CWND_WAIT, &xprt->state);
300 }
301 
302 static void
xprt_set_congestion_window_wait(struct rpc_xprt * xprt)303 xprt_set_congestion_window_wait(struct rpc_xprt *xprt)
304 {
305 	if (!list_empty(&xprt->xmit_queue)) {
306 		/* Peek at head of queue to see if it can make progress */
307 		if (list_first_entry(&xprt->xmit_queue, struct rpc_rqst,
308 					rq_xmit)->rq_cong)
309 			return;
310 	}
311 	set_bit(XPRT_CWND_WAIT, &xprt->state);
312 }
313 
314 static void
xprt_test_and_clear_congestion_window_wait(struct rpc_xprt * xprt)315 xprt_test_and_clear_congestion_window_wait(struct rpc_xprt *xprt)
316 {
317 	if (!RPCXPRT_CONGESTED(xprt))
318 		clear_bit(XPRT_CWND_WAIT, &xprt->state);
319 }
320 
321 /*
322  * xprt_reserve_xprt_cong - serialize write access to transports
323  * @task: task that is requesting access to the transport
324  *
325  * Same as xprt_reserve_xprt, but Van Jacobson congestion control is
326  * integrated into the decision of whether a request is allowed to be
327  * woken up and given access to the transport.
328  * Note that the lock is only granted if we know there are free slots.
329  */
xprt_reserve_xprt_cong(struct rpc_xprt * xprt,struct rpc_task * task)330 int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
331 {
332 	struct rpc_rqst *req = task->tk_rqstp;
333 
334 	if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
335 		if (task == xprt->snd_task)
336 			goto out_locked;
337 		goto out_sleep;
338 	}
339 	if (req == NULL) {
340 		xprt->snd_task = task;
341 		goto out_locked;
342 	}
343 	if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
344 		goto out_unlock;
345 	if (!xprt_need_congestion_window_wait(xprt)) {
346 		xprt->snd_task = task;
347 		goto out_locked;
348 	}
349 out_unlock:
350 	xprt_clear_locked(xprt);
351 out_sleep:
352 	task->tk_status = -EAGAIN;
353 	if (RPC_IS_SOFT(task))
354 		rpc_sleep_on_timeout(&xprt->sending, task, NULL,
355 				xprt_request_timeout(req));
356 	else
357 		rpc_sleep_on(&xprt->sending, task, NULL);
358 	return 0;
359 out_locked:
360 	trace_xprt_reserve_cong(xprt, task);
361 	return 1;
362 }
363 EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong);
364 
xprt_lock_write(struct rpc_xprt * xprt,struct rpc_task * task)365 static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
366 {
367 	int retval;
368 
369 	if (test_bit(XPRT_LOCKED, &xprt->state) && xprt->snd_task == task)
370 		return 1;
371 	spin_lock(&xprt->transport_lock);
372 	retval = xprt->ops->reserve_xprt(xprt, task);
373 	spin_unlock(&xprt->transport_lock);
374 	return retval;
375 }
376 
__xprt_lock_write_func(struct rpc_task * task,void * data)377 static bool __xprt_lock_write_func(struct rpc_task *task, void *data)
378 {
379 	struct rpc_xprt *xprt = data;
380 
381 	xprt->snd_task = task;
382 	return true;
383 }
384 
__xprt_lock_write_next(struct rpc_xprt * xprt)385 static void __xprt_lock_write_next(struct rpc_xprt *xprt)
386 {
387 	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
388 		return;
389 	if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
390 		goto out_unlock;
391 	if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending,
392 				__xprt_lock_write_func, xprt))
393 		return;
394 out_unlock:
395 	xprt_clear_locked(xprt);
396 }
397 
__xprt_lock_write_next_cong(struct rpc_xprt * xprt)398 static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
399 {
400 	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
401 		return;
402 	if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
403 		goto out_unlock;
404 	if (xprt_need_congestion_window_wait(xprt))
405 		goto out_unlock;
406 	if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending,
407 				__xprt_lock_write_func, xprt))
408 		return;
409 out_unlock:
410 	xprt_clear_locked(xprt);
411 }
412 
413 /**
414  * xprt_release_xprt - allow other requests to use a transport
415  * @xprt: transport with other tasks potentially waiting
416  * @task: task that is releasing access to the transport
417  *
418  * Note that "task" can be NULL.  No congestion control is provided.
419  */
xprt_release_xprt(struct rpc_xprt * xprt,struct rpc_task * task)420 void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
421 {
422 	if (xprt->snd_task == task) {
423 		xprt_clear_locked(xprt);
424 		__xprt_lock_write_next(xprt);
425 	}
426 	trace_xprt_release_xprt(xprt, task);
427 }
428 EXPORT_SYMBOL_GPL(xprt_release_xprt);
429 
430 /**
431  * xprt_release_xprt_cong - allow other requests to use a transport
432  * @xprt: transport with other tasks potentially waiting
433  * @task: task that is releasing access to the transport
434  *
435  * Note that "task" can be NULL.  Another task is awoken to use the
436  * transport if the transport's congestion window allows it.
437  */
xprt_release_xprt_cong(struct rpc_xprt * xprt,struct rpc_task * task)438 void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
439 {
440 	if (xprt->snd_task == task) {
441 		xprt_clear_locked(xprt);
442 		__xprt_lock_write_next_cong(xprt);
443 	}
444 	trace_xprt_release_cong(xprt, task);
445 }
446 EXPORT_SYMBOL_GPL(xprt_release_xprt_cong);
447 
xprt_release_write(struct rpc_xprt * xprt,struct rpc_task * task)448 void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
449 {
450 	if (xprt->snd_task != task)
451 		return;
452 	spin_lock(&xprt->transport_lock);
453 	xprt->ops->release_xprt(xprt, task);
454 	spin_unlock(&xprt->transport_lock);
455 }
456 
457 /*
458  * Van Jacobson congestion avoidance. Check if the congestion window
459  * overflowed. Put the task to sleep if this is the case.
460  */
461 static int
__xprt_get_cong(struct rpc_xprt * xprt,struct rpc_rqst * req)462 __xprt_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
463 {
464 	if (req->rq_cong)
465 		return 1;
466 	trace_xprt_get_cong(xprt, req->rq_task);
467 	if (RPCXPRT_CONGESTED(xprt)) {
468 		xprt_set_congestion_window_wait(xprt);
469 		return 0;
470 	}
471 	req->rq_cong = 1;
472 	xprt->cong += RPC_CWNDSCALE;
473 	return 1;
474 }
475 
476 /*
477  * Adjust the congestion window, and wake up the next task
478  * that has been sleeping due to congestion
479  */
480 static void
__xprt_put_cong(struct rpc_xprt * xprt,struct rpc_rqst * req)481 __xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
482 {
483 	if (!req->rq_cong)
484 		return;
485 	req->rq_cong = 0;
486 	xprt->cong -= RPC_CWNDSCALE;
487 	xprt_test_and_clear_congestion_window_wait(xprt);
488 	trace_xprt_put_cong(xprt, req->rq_task);
489 	__xprt_lock_write_next_cong(xprt);
490 }
491 
492 /**
493  * xprt_request_get_cong - Request congestion control credits
494  * @xprt: pointer to transport
495  * @req: pointer to RPC request
496  *
497  * Useful for transports that require congestion control.
498  */
499 bool
xprt_request_get_cong(struct rpc_xprt * xprt,struct rpc_rqst * req)500 xprt_request_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
501 {
502 	bool ret = false;
503 
504 	if (req->rq_cong)
505 		return true;
506 	spin_lock(&xprt->transport_lock);
507 	ret = __xprt_get_cong(xprt, req) != 0;
508 	spin_unlock(&xprt->transport_lock);
509 	return ret;
510 }
511 EXPORT_SYMBOL_GPL(xprt_request_get_cong);
512 
513 /**
514  * xprt_release_rqst_cong - housekeeping when request is complete
515  * @task: RPC request that recently completed
516  *
517  * Useful for transports that require congestion control.
518  */
xprt_release_rqst_cong(struct rpc_task * task)519 void xprt_release_rqst_cong(struct rpc_task *task)
520 {
521 	struct rpc_rqst *req = task->tk_rqstp;
522 
523 	__xprt_put_cong(req->rq_xprt, req);
524 }
525 EXPORT_SYMBOL_GPL(xprt_release_rqst_cong);
526 
xprt_clear_congestion_window_wait_locked(struct rpc_xprt * xprt)527 static void xprt_clear_congestion_window_wait_locked(struct rpc_xprt *xprt)
528 {
529 	if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state))
530 		__xprt_lock_write_next_cong(xprt);
531 }
532 
533 /*
534  * Clear the congestion window wait flag and wake up the next
535  * entry on xprt->sending
536  */
537 static void
xprt_clear_congestion_window_wait(struct rpc_xprt * xprt)538 xprt_clear_congestion_window_wait(struct rpc_xprt *xprt)
539 {
540 	if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state)) {
541 		spin_lock(&xprt->transport_lock);
542 		__xprt_lock_write_next_cong(xprt);
543 		spin_unlock(&xprt->transport_lock);
544 	}
545 }
546 
547 /**
548  * xprt_adjust_cwnd - adjust transport congestion window
549  * @xprt: pointer to xprt
550  * @task: recently completed RPC request used to adjust window
551  * @result: result code of completed RPC request
552  *
553  * The transport code maintains an estimate on the maximum number of out-
554  * standing RPC requests, using a smoothed version of the congestion
555  * avoidance implemented in 44BSD. This is basically the Van Jacobson
556  * congestion algorithm: If a retransmit occurs, the congestion window is
557  * halved; otherwise, it is incremented by 1/cwnd when
558  *
559  *	-	a reply is received and
560  *	-	a full number of requests are outstanding and
561  *	-	the congestion window hasn't been updated recently.
562  */
xprt_adjust_cwnd(struct rpc_xprt * xprt,struct rpc_task * task,int result)563 void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result)
564 {
565 	struct rpc_rqst *req = task->tk_rqstp;
566 	unsigned long cwnd = xprt->cwnd;
567 
568 	if (result >= 0 && cwnd <= xprt->cong) {
569 		/* The (cwnd >> 1) term makes sure
570 		 * the result gets rounded properly. */
571 		cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd;
572 		if (cwnd > RPC_MAXCWND(xprt))
573 			cwnd = RPC_MAXCWND(xprt);
574 		__xprt_lock_write_next_cong(xprt);
575 	} else if (result == -ETIMEDOUT) {
576 		cwnd >>= 1;
577 		if (cwnd < RPC_CWNDSCALE)
578 			cwnd = RPC_CWNDSCALE;
579 	}
580 	dprintk("RPC:       cong %ld, cwnd was %ld, now %ld\n",
581 			xprt->cong, xprt->cwnd, cwnd);
582 	xprt->cwnd = cwnd;
583 	__xprt_put_cong(xprt, req);
584 }
585 EXPORT_SYMBOL_GPL(xprt_adjust_cwnd);
586 
587 /**
588  * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue
589  * @xprt: transport with waiting tasks
590  * @status: result code to plant in each task before waking it
591  *
592  */
xprt_wake_pending_tasks(struct rpc_xprt * xprt,int status)593 void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status)
594 {
595 	if (status < 0)
596 		rpc_wake_up_status(&xprt->pending, status);
597 	else
598 		rpc_wake_up(&xprt->pending);
599 }
600 EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks);
601 
602 /**
603  * xprt_wait_for_buffer_space - wait for transport output buffer to clear
604  * @xprt: transport
605  *
606  * Note that we only set the timer for the case of RPC_IS_SOFT(), since
607  * we don't in general want to force a socket disconnection due to
608  * an incomplete RPC call transmission.
609  */
xprt_wait_for_buffer_space(struct rpc_xprt * xprt)610 void xprt_wait_for_buffer_space(struct rpc_xprt *xprt)
611 {
612 	set_bit(XPRT_WRITE_SPACE, &xprt->state);
613 }
614 EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space);
615 
616 static bool
xprt_clear_write_space_locked(struct rpc_xprt * xprt)617 xprt_clear_write_space_locked(struct rpc_xprt *xprt)
618 {
619 	if (test_and_clear_bit(XPRT_WRITE_SPACE, &xprt->state)) {
620 		__xprt_lock_write_next(xprt);
621 		dprintk("RPC:       write space: waking waiting task on "
622 				"xprt %p\n", xprt);
623 		return true;
624 	}
625 	return false;
626 }
627 
628 /**
629  * xprt_write_space - wake the task waiting for transport output buffer space
630  * @xprt: transport with waiting tasks
631  *
632  * Can be called in a soft IRQ context, so xprt_write_space never sleeps.
633  */
xprt_write_space(struct rpc_xprt * xprt)634 bool xprt_write_space(struct rpc_xprt *xprt)
635 {
636 	bool ret;
637 
638 	if (!test_bit(XPRT_WRITE_SPACE, &xprt->state))
639 		return false;
640 	spin_lock(&xprt->transport_lock);
641 	ret = xprt_clear_write_space_locked(xprt);
642 	spin_unlock(&xprt->transport_lock);
643 	return ret;
644 }
645 EXPORT_SYMBOL_GPL(xprt_write_space);
646 
xprt_abs_ktime_to_jiffies(ktime_t abstime)647 static unsigned long xprt_abs_ktime_to_jiffies(ktime_t abstime)
648 {
649 	s64 delta = ktime_to_ns(ktime_get() - abstime);
650 	return likely(delta >= 0) ?
651 		jiffies - nsecs_to_jiffies(delta) :
652 		jiffies + nsecs_to_jiffies(-delta);
653 }
654 
xprt_calc_majortimeo(struct rpc_rqst * req)655 static unsigned long xprt_calc_majortimeo(struct rpc_rqst *req)
656 {
657 	const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
658 	unsigned long majortimeo = req->rq_timeout;
659 
660 	if (to->to_exponential)
661 		majortimeo <<= to->to_retries;
662 	else
663 		majortimeo += to->to_increment * to->to_retries;
664 	if (majortimeo > to->to_maxval || majortimeo == 0)
665 		majortimeo = to->to_maxval;
666 	return majortimeo;
667 }
668 
xprt_reset_majortimeo(struct rpc_rqst * req)669 static void xprt_reset_majortimeo(struct rpc_rqst *req)
670 {
671 	req->rq_majortimeo += xprt_calc_majortimeo(req);
672 }
673 
xprt_reset_minortimeo(struct rpc_rqst * req)674 static void xprt_reset_minortimeo(struct rpc_rqst *req)
675 {
676 	req->rq_minortimeo += req->rq_timeout;
677 }
678 
xprt_init_majortimeo(struct rpc_task * task,struct rpc_rqst * req)679 static void xprt_init_majortimeo(struct rpc_task *task, struct rpc_rqst *req)
680 {
681 	unsigned long time_init;
682 	struct rpc_xprt *xprt = req->rq_xprt;
683 
684 	if (likely(xprt && xprt_connected(xprt)))
685 		time_init = jiffies;
686 	else
687 		time_init = xprt_abs_ktime_to_jiffies(task->tk_start);
688 	req->rq_timeout = task->tk_client->cl_timeout->to_initval;
689 	req->rq_majortimeo = time_init + xprt_calc_majortimeo(req);
690 	req->rq_minortimeo = time_init + req->rq_timeout;
691 }
692 
693 /**
694  * xprt_adjust_timeout - adjust timeout values for next retransmit
695  * @req: RPC request containing parameters to use for the adjustment
696  *
697  */
xprt_adjust_timeout(struct rpc_rqst * req)698 int xprt_adjust_timeout(struct rpc_rqst *req)
699 {
700 	struct rpc_xprt *xprt = req->rq_xprt;
701 	const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
702 	int status = 0;
703 
704 	if (time_before(jiffies, req->rq_majortimeo)) {
705 		if (time_before(jiffies, req->rq_minortimeo))
706 			return status;
707 		if (to->to_exponential)
708 			req->rq_timeout <<= 1;
709 		else
710 			req->rq_timeout += to->to_increment;
711 		if (to->to_maxval && req->rq_timeout >= to->to_maxval)
712 			req->rq_timeout = to->to_maxval;
713 		req->rq_retries++;
714 	} else {
715 		req->rq_timeout = to->to_initval;
716 		req->rq_retries = 0;
717 		xprt_reset_majortimeo(req);
718 		/* Reset the RTT counters == "slow start" */
719 		spin_lock(&xprt->transport_lock);
720 		rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval);
721 		spin_unlock(&xprt->transport_lock);
722 		status = -ETIMEDOUT;
723 	}
724 	xprt_reset_minortimeo(req);
725 
726 	if (req->rq_timeout == 0) {
727 		printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n");
728 		req->rq_timeout = 5 * HZ;
729 	}
730 	return status;
731 }
732 
xprt_autoclose(struct work_struct * work)733 static void xprt_autoclose(struct work_struct *work)
734 {
735 	struct rpc_xprt *xprt =
736 		container_of(work, struct rpc_xprt, task_cleanup);
737 	unsigned int pflags = memalloc_nofs_save();
738 
739 	trace_xprt_disconnect_auto(xprt);
740 	clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
741 	xprt->ops->close(xprt);
742 	xprt_release_write(xprt, NULL);
743 	wake_up_bit(&xprt->state, XPRT_LOCKED);
744 	memalloc_nofs_restore(pflags);
745 }
746 
747 /**
748  * xprt_disconnect_done - mark a transport as disconnected
749  * @xprt: transport to flag for disconnect
750  *
751  */
xprt_disconnect_done(struct rpc_xprt * xprt)752 void xprt_disconnect_done(struct rpc_xprt *xprt)
753 {
754 	trace_xprt_disconnect_done(xprt);
755 	spin_lock(&xprt->transport_lock);
756 	xprt_clear_connected(xprt);
757 	xprt_clear_write_space_locked(xprt);
758 	xprt_clear_congestion_window_wait_locked(xprt);
759 	xprt_wake_pending_tasks(xprt, -ENOTCONN);
760 	spin_unlock(&xprt->transport_lock);
761 }
762 EXPORT_SYMBOL_GPL(xprt_disconnect_done);
763 
764 /**
765  * xprt_schedule_autoclose_locked - Try to schedule an autoclose RPC call
766  * @xprt: transport to disconnect
767  */
xprt_schedule_autoclose_locked(struct rpc_xprt * xprt)768 static void xprt_schedule_autoclose_locked(struct rpc_xprt *xprt)
769 {
770 	if (test_and_set_bit(XPRT_CLOSE_WAIT, &xprt->state))
771 		return;
772 	if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
773 		queue_work(xprtiod_workqueue, &xprt->task_cleanup);
774 	else if (xprt->snd_task && !test_bit(XPRT_SND_IS_COOKIE, &xprt->state))
775 		rpc_wake_up_queued_task_set_status(&xprt->pending,
776 						   xprt->snd_task, -ENOTCONN);
777 }
778 
779 /**
780  * xprt_force_disconnect - force a transport to disconnect
781  * @xprt: transport to disconnect
782  *
783  */
xprt_force_disconnect(struct rpc_xprt * xprt)784 void xprt_force_disconnect(struct rpc_xprt *xprt)
785 {
786 	trace_xprt_disconnect_force(xprt);
787 
788 	/* Don't race with the test_bit() in xprt_clear_locked() */
789 	spin_lock(&xprt->transport_lock);
790 	xprt_schedule_autoclose_locked(xprt);
791 	spin_unlock(&xprt->transport_lock);
792 }
793 EXPORT_SYMBOL_GPL(xprt_force_disconnect);
794 
795 static unsigned int
xprt_connect_cookie(struct rpc_xprt * xprt)796 xprt_connect_cookie(struct rpc_xprt *xprt)
797 {
798 	return READ_ONCE(xprt->connect_cookie);
799 }
800 
801 static bool
xprt_request_retransmit_after_disconnect(struct rpc_task * task)802 xprt_request_retransmit_after_disconnect(struct rpc_task *task)
803 {
804 	struct rpc_rqst *req = task->tk_rqstp;
805 	struct rpc_xprt *xprt = req->rq_xprt;
806 
807 	return req->rq_connect_cookie != xprt_connect_cookie(xprt) ||
808 		!xprt_connected(xprt);
809 }
810 
811 /**
812  * xprt_conditional_disconnect - force a transport to disconnect
813  * @xprt: transport to disconnect
814  * @cookie: 'connection cookie'
815  *
816  * This attempts to break the connection if and only if 'cookie' matches
817  * the current transport 'connection cookie'. It ensures that we don't
818  * try to break the connection more than once when we need to retransmit
819  * a batch of RPC requests.
820  *
821  */
xprt_conditional_disconnect(struct rpc_xprt * xprt,unsigned int cookie)822 void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie)
823 {
824 	/* Don't race with the test_bit() in xprt_clear_locked() */
825 	spin_lock(&xprt->transport_lock);
826 	if (cookie != xprt->connect_cookie)
827 		goto out;
828 	if (test_bit(XPRT_CLOSING, &xprt->state))
829 		goto out;
830 	xprt_schedule_autoclose_locked(xprt);
831 out:
832 	spin_unlock(&xprt->transport_lock);
833 }
834 
835 static bool
xprt_has_timer(const struct rpc_xprt * xprt)836 xprt_has_timer(const struct rpc_xprt *xprt)
837 {
838 	return xprt->idle_timeout != 0;
839 }
840 
841 static void
xprt_schedule_autodisconnect(struct rpc_xprt * xprt)842 xprt_schedule_autodisconnect(struct rpc_xprt *xprt)
843 	__must_hold(&xprt->transport_lock)
844 {
845 	xprt->last_used = jiffies;
846 	if (RB_EMPTY_ROOT(&xprt->recv_queue) && xprt_has_timer(xprt))
847 		mod_timer(&xprt->timer, xprt->last_used + xprt->idle_timeout);
848 }
849 
850 static void
xprt_init_autodisconnect(struct timer_list * t)851 xprt_init_autodisconnect(struct timer_list *t)
852 {
853 	struct rpc_xprt *xprt = from_timer(xprt, t, timer);
854 
855 	if (!RB_EMPTY_ROOT(&xprt->recv_queue))
856 		return;
857 	/* Reset xprt->last_used to avoid connect/autodisconnect cycling */
858 	xprt->last_used = jiffies;
859 	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
860 		return;
861 	queue_work(xprtiod_workqueue, &xprt->task_cleanup);
862 }
863 
864 #if IS_ENABLED(CONFIG_FAIL_SUNRPC)
xprt_inject_disconnect(struct rpc_xprt * xprt)865 static void xprt_inject_disconnect(struct rpc_xprt *xprt)
866 {
867 	if (!fail_sunrpc.ignore_client_disconnect &&
868 	    should_fail(&fail_sunrpc.attr, 1))
869 		xprt->ops->inject_disconnect(xprt);
870 }
871 #else
xprt_inject_disconnect(struct rpc_xprt * xprt)872 static inline void xprt_inject_disconnect(struct rpc_xprt *xprt)
873 {
874 }
875 #endif
876 
xprt_lock_connect(struct rpc_xprt * xprt,struct rpc_task * task,void * cookie)877 bool xprt_lock_connect(struct rpc_xprt *xprt,
878 		struct rpc_task *task,
879 		void *cookie)
880 {
881 	bool ret = false;
882 
883 	spin_lock(&xprt->transport_lock);
884 	if (!test_bit(XPRT_LOCKED, &xprt->state))
885 		goto out;
886 	if (xprt->snd_task != task)
887 		goto out;
888 	set_bit(XPRT_SND_IS_COOKIE, &xprt->state);
889 	xprt->snd_task = cookie;
890 	ret = true;
891 out:
892 	spin_unlock(&xprt->transport_lock);
893 	return ret;
894 }
895 EXPORT_SYMBOL_GPL(xprt_lock_connect);
896 
xprt_unlock_connect(struct rpc_xprt * xprt,void * cookie)897 void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie)
898 {
899 	spin_lock(&xprt->transport_lock);
900 	if (xprt->snd_task != cookie)
901 		goto out;
902 	if (!test_bit(XPRT_LOCKED, &xprt->state))
903 		goto out;
904 	xprt->snd_task =NULL;
905 	clear_bit(XPRT_SND_IS_COOKIE, &xprt->state);
906 	xprt->ops->release_xprt(xprt, NULL);
907 	xprt_schedule_autodisconnect(xprt);
908 out:
909 	spin_unlock(&xprt->transport_lock);
910 	wake_up_bit(&xprt->state, XPRT_LOCKED);
911 }
912 EXPORT_SYMBOL_GPL(xprt_unlock_connect);
913 
914 /**
915  * xprt_connect - schedule a transport connect operation
916  * @task: RPC task that is requesting the connect
917  *
918  */
xprt_connect(struct rpc_task * task)919 void xprt_connect(struct rpc_task *task)
920 {
921 	struct rpc_xprt	*xprt = task->tk_rqstp->rq_xprt;
922 
923 	trace_xprt_connect(xprt);
924 
925 	if (!xprt_bound(xprt)) {
926 		task->tk_status = -EAGAIN;
927 		return;
928 	}
929 	if (!xprt_lock_write(xprt, task))
930 		return;
931 
932 	if (!xprt_connected(xprt) && !test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
933 		task->tk_rqstp->rq_connect_cookie = xprt->connect_cookie;
934 		rpc_sleep_on_timeout(&xprt->pending, task, NULL,
935 				xprt_request_timeout(task->tk_rqstp));
936 
937 		if (test_bit(XPRT_CLOSING, &xprt->state))
938 			return;
939 		if (xprt_test_and_set_connecting(xprt))
940 			return;
941 		/* Race breaker */
942 		if (!xprt_connected(xprt)) {
943 			xprt->stat.connect_start = jiffies;
944 			xprt->ops->connect(xprt, task);
945 		} else {
946 			xprt_clear_connecting(xprt);
947 			task->tk_status = 0;
948 			rpc_wake_up_queued_task(&xprt->pending, task);
949 		}
950 	}
951 	xprt_release_write(xprt, task);
952 }
953 
954 /**
955  * xprt_reconnect_delay - compute the wait before scheduling a connect
956  * @xprt: transport instance
957  *
958  */
xprt_reconnect_delay(const struct rpc_xprt * xprt)959 unsigned long xprt_reconnect_delay(const struct rpc_xprt *xprt)
960 {
961 	unsigned long start, now = jiffies;
962 
963 	start = xprt->stat.connect_start + xprt->reestablish_timeout;
964 	if (time_after(start, now))
965 		return start - now;
966 	return 0;
967 }
968 EXPORT_SYMBOL_GPL(xprt_reconnect_delay);
969 
970 /**
971  * xprt_reconnect_backoff - compute the new re-establish timeout
972  * @xprt: transport instance
973  * @init_to: initial reestablish timeout
974  *
975  */
xprt_reconnect_backoff(struct rpc_xprt * xprt,unsigned long init_to)976 void xprt_reconnect_backoff(struct rpc_xprt *xprt, unsigned long init_to)
977 {
978 	xprt->reestablish_timeout <<= 1;
979 	if (xprt->reestablish_timeout > xprt->max_reconnect_timeout)
980 		xprt->reestablish_timeout = xprt->max_reconnect_timeout;
981 	if (xprt->reestablish_timeout < init_to)
982 		xprt->reestablish_timeout = init_to;
983 }
984 EXPORT_SYMBOL_GPL(xprt_reconnect_backoff);
985 
986 enum xprt_xid_rb_cmp {
987 	XID_RB_EQUAL,
988 	XID_RB_LEFT,
989 	XID_RB_RIGHT,
990 };
991 static enum xprt_xid_rb_cmp
xprt_xid_cmp(__be32 xid1,__be32 xid2)992 xprt_xid_cmp(__be32 xid1, __be32 xid2)
993 {
994 	if (xid1 == xid2)
995 		return XID_RB_EQUAL;
996 	if ((__force u32)xid1 < (__force u32)xid2)
997 		return XID_RB_LEFT;
998 	return XID_RB_RIGHT;
999 }
1000 
1001 static struct rpc_rqst *
xprt_request_rb_find(struct rpc_xprt * xprt,__be32 xid)1002 xprt_request_rb_find(struct rpc_xprt *xprt, __be32 xid)
1003 {
1004 	struct rb_node *n = xprt->recv_queue.rb_node;
1005 	struct rpc_rqst *req;
1006 
1007 	while (n != NULL) {
1008 		req = rb_entry(n, struct rpc_rqst, rq_recv);
1009 		switch (xprt_xid_cmp(xid, req->rq_xid)) {
1010 		case XID_RB_LEFT:
1011 			n = n->rb_left;
1012 			break;
1013 		case XID_RB_RIGHT:
1014 			n = n->rb_right;
1015 			break;
1016 		case XID_RB_EQUAL:
1017 			return req;
1018 		}
1019 	}
1020 	return NULL;
1021 }
1022 
1023 static void
xprt_request_rb_insert(struct rpc_xprt * xprt,struct rpc_rqst * new)1024 xprt_request_rb_insert(struct rpc_xprt *xprt, struct rpc_rqst *new)
1025 {
1026 	struct rb_node **p = &xprt->recv_queue.rb_node;
1027 	struct rb_node *n = NULL;
1028 	struct rpc_rqst *req;
1029 
1030 	while (*p != NULL) {
1031 		n = *p;
1032 		req = rb_entry(n, struct rpc_rqst, rq_recv);
1033 		switch(xprt_xid_cmp(new->rq_xid, req->rq_xid)) {
1034 		case XID_RB_LEFT:
1035 			p = &n->rb_left;
1036 			break;
1037 		case XID_RB_RIGHT:
1038 			p = &n->rb_right;
1039 			break;
1040 		case XID_RB_EQUAL:
1041 			WARN_ON_ONCE(new != req);
1042 			return;
1043 		}
1044 	}
1045 	rb_link_node(&new->rq_recv, n, p);
1046 	rb_insert_color(&new->rq_recv, &xprt->recv_queue);
1047 }
1048 
1049 static void
xprt_request_rb_remove(struct rpc_xprt * xprt,struct rpc_rqst * req)1050 xprt_request_rb_remove(struct rpc_xprt *xprt, struct rpc_rqst *req)
1051 {
1052 	rb_erase(&req->rq_recv, &xprt->recv_queue);
1053 }
1054 
1055 /**
1056  * xprt_lookup_rqst - find an RPC request corresponding to an XID
1057  * @xprt: transport on which the original request was transmitted
1058  * @xid: RPC XID of incoming reply
1059  *
1060  * Caller holds xprt->queue_lock.
1061  */
xprt_lookup_rqst(struct rpc_xprt * xprt,__be32 xid)1062 struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
1063 {
1064 	struct rpc_rqst *entry;
1065 
1066 	entry = xprt_request_rb_find(xprt, xid);
1067 	if (entry != NULL) {
1068 		trace_xprt_lookup_rqst(xprt, xid, 0);
1069 		entry->rq_rtt = ktime_sub(ktime_get(), entry->rq_xtime);
1070 		return entry;
1071 	}
1072 
1073 	dprintk("RPC:       xprt_lookup_rqst did not find xid %08x\n",
1074 			ntohl(xid));
1075 	trace_xprt_lookup_rqst(xprt, xid, -ENOENT);
1076 	xprt->stat.bad_xids++;
1077 	return NULL;
1078 }
1079 EXPORT_SYMBOL_GPL(xprt_lookup_rqst);
1080 
1081 static bool
xprt_is_pinned_rqst(struct rpc_rqst * req)1082 xprt_is_pinned_rqst(struct rpc_rqst *req)
1083 {
1084 	return atomic_read(&req->rq_pin) != 0;
1085 }
1086 
1087 /**
1088  * xprt_pin_rqst - Pin a request on the transport receive list
1089  * @req: Request to pin
1090  *
1091  * Caller must ensure this is atomic with the call to xprt_lookup_rqst()
1092  * so should be holding xprt->queue_lock.
1093  */
xprt_pin_rqst(struct rpc_rqst * req)1094 void xprt_pin_rqst(struct rpc_rqst *req)
1095 {
1096 	atomic_inc(&req->rq_pin);
1097 }
1098 EXPORT_SYMBOL_GPL(xprt_pin_rqst);
1099 
1100 /**
1101  * xprt_unpin_rqst - Unpin a request on the transport receive list
1102  * @req: Request to pin
1103  *
1104  * Caller should be holding xprt->queue_lock.
1105  */
xprt_unpin_rqst(struct rpc_rqst * req)1106 void xprt_unpin_rqst(struct rpc_rqst *req)
1107 {
1108 	if (!test_bit(RPC_TASK_MSG_PIN_WAIT, &req->rq_task->tk_runstate)) {
1109 		atomic_dec(&req->rq_pin);
1110 		return;
1111 	}
1112 	if (atomic_dec_and_test(&req->rq_pin))
1113 		wake_up_var(&req->rq_pin);
1114 }
1115 EXPORT_SYMBOL_GPL(xprt_unpin_rqst);
1116 
xprt_wait_on_pinned_rqst(struct rpc_rqst * req)1117 static void xprt_wait_on_pinned_rqst(struct rpc_rqst *req)
1118 {
1119 	wait_var_event(&req->rq_pin, !xprt_is_pinned_rqst(req));
1120 }
1121 
1122 static bool
xprt_request_data_received(struct rpc_task * task)1123 xprt_request_data_received(struct rpc_task *task)
1124 {
1125 	return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) &&
1126 		READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) != 0;
1127 }
1128 
1129 static bool
xprt_request_need_enqueue_receive(struct rpc_task * task,struct rpc_rqst * req)1130 xprt_request_need_enqueue_receive(struct rpc_task *task, struct rpc_rqst *req)
1131 {
1132 	return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) &&
1133 		READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) == 0;
1134 }
1135 
1136 /**
1137  * xprt_request_enqueue_receive - Add an request to the receive queue
1138  * @task: RPC task
1139  *
1140  */
1141 void
xprt_request_enqueue_receive(struct rpc_task * task)1142 xprt_request_enqueue_receive(struct rpc_task *task)
1143 {
1144 	struct rpc_rqst *req = task->tk_rqstp;
1145 	struct rpc_xprt *xprt = req->rq_xprt;
1146 
1147 	if (!xprt_request_need_enqueue_receive(task, req))
1148 		return;
1149 
1150 	xprt_request_prepare(task->tk_rqstp);
1151 	spin_lock(&xprt->queue_lock);
1152 
1153 	/* Update the softirq receive buffer */
1154 	memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
1155 			sizeof(req->rq_private_buf));
1156 
1157 	/* Add request to the receive list */
1158 	xprt_request_rb_insert(xprt, req);
1159 	set_bit(RPC_TASK_NEED_RECV, &task->tk_runstate);
1160 	spin_unlock(&xprt->queue_lock);
1161 
1162 	/* Turn off autodisconnect */
1163 	del_singleshot_timer_sync(&xprt->timer);
1164 }
1165 
1166 /**
1167  * xprt_request_dequeue_receive_locked - Remove a request from the receive queue
1168  * @task: RPC task
1169  *
1170  * Caller must hold xprt->queue_lock.
1171  */
1172 static void
xprt_request_dequeue_receive_locked(struct rpc_task * task)1173 xprt_request_dequeue_receive_locked(struct rpc_task *task)
1174 {
1175 	struct rpc_rqst *req = task->tk_rqstp;
1176 
1177 	if (test_and_clear_bit(RPC_TASK_NEED_RECV, &task->tk_runstate))
1178 		xprt_request_rb_remove(req->rq_xprt, req);
1179 }
1180 
1181 /**
1182  * xprt_update_rtt - Update RPC RTT statistics
1183  * @task: RPC request that recently completed
1184  *
1185  * Caller holds xprt->queue_lock.
1186  */
xprt_update_rtt(struct rpc_task * task)1187 void xprt_update_rtt(struct rpc_task *task)
1188 {
1189 	struct rpc_rqst *req = task->tk_rqstp;
1190 	struct rpc_rtt *rtt = task->tk_client->cl_rtt;
1191 	unsigned int timer = task->tk_msg.rpc_proc->p_timer;
1192 	long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt));
1193 
1194 	if (timer) {
1195 		if (req->rq_ntrans == 1)
1196 			rpc_update_rtt(rtt, timer, m);
1197 		rpc_set_timeo(rtt, timer, req->rq_ntrans - 1);
1198 	}
1199 }
1200 EXPORT_SYMBOL_GPL(xprt_update_rtt);
1201 
1202 /**
1203  * xprt_complete_rqst - called when reply processing is complete
1204  * @task: RPC request that recently completed
1205  * @copied: actual number of bytes received from the transport
1206  *
1207  * Caller holds xprt->queue_lock.
1208  */
xprt_complete_rqst(struct rpc_task * task,int copied)1209 void xprt_complete_rqst(struct rpc_task *task, int copied)
1210 {
1211 	struct rpc_rqst *req = task->tk_rqstp;
1212 	struct rpc_xprt *xprt = req->rq_xprt;
1213 
1214 	xprt->stat.recvs++;
1215 
1216 	req->rq_private_buf.len = copied;
1217 	/* Ensure all writes are done before we update */
1218 	/* req->rq_reply_bytes_recvd */
1219 	smp_wmb();
1220 	req->rq_reply_bytes_recvd = copied;
1221 	xprt_request_dequeue_receive_locked(task);
1222 	rpc_wake_up_queued_task(&xprt->pending, task);
1223 }
1224 EXPORT_SYMBOL_GPL(xprt_complete_rqst);
1225 
xprt_timer(struct rpc_task * task)1226 static void xprt_timer(struct rpc_task *task)
1227 {
1228 	struct rpc_rqst *req = task->tk_rqstp;
1229 	struct rpc_xprt *xprt = req->rq_xprt;
1230 
1231 	if (task->tk_status != -ETIMEDOUT)
1232 		return;
1233 
1234 	trace_xprt_timer(xprt, req->rq_xid, task->tk_status);
1235 	if (!req->rq_reply_bytes_recvd) {
1236 		if (xprt->ops->timer)
1237 			xprt->ops->timer(xprt, task);
1238 	} else
1239 		task->tk_status = 0;
1240 }
1241 
1242 /**
1243  * xprt_wait_for_reply_request_def - wait for reply
1244  * @task: pointer to rpc_task
1245  *
1246  * Set a request's retransmit timeout based on the transport's
1247  * default timeout parameters.  Used by transports that don't adjust
1248  * the retransmit timeout based on round-trip time estimation,
1249  * and put the task to sleep on the pending queue.
1250  */
xprt_wait_for_reply_request_def(struct rpc_task * task)1251 void xprt_wait_for_reply_request_def(struct rpc_task *task)
1252 {
1253 	struct rpc_rqst *req = task->tk_rqstp;
1254 
1255 	rpc_sleep_on_timeout(&req->rq_xprt->pending, task, xprt_timer,
1256 			xprt_request_timeout(req));
1257 }
1258 EXPORT_SYMBOL_GPL(xprt_wait_for_reply_request_def);
1259 
1260 /**
1261  * xprt_wait_for_reply_request_rtt - wait for reply using RTT estimator
1262  * @task: pointer to rpc_task
1263  *
1264  * Set a request's retransmit timeout using the RTT estimator,
1265  * and put the task to sleep on the pending queue.
1266  */
xprt_wait_for_reply_request_rtt(struct rpc_task * task)1267 void xprt_wait_for_reply_request_rtt(struct rpc_task *task)
1268 {
1269 	int timer = task->tk_msg.rpc_proc->p_timer;
1270 	struct rpc_clnt *clnt = task->tk_client;
1271 	struct rpc_rtt *rtt = clnt->cl_rtt;
1272 	struct rpc_rqst *req = task->tk_rqstp;
1273 	unsigned long max_timeout = clnt->cl_timeout->to_maxval;
1274 	unsigned long timeout;
1275 
1276 	timeout = rpc_calc_rto(rtt, timer);
1277 	timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries;
1278 	if (timeout > max_timeout || timeout == 0)
1279 		timeout = max_timeout;
1280 	rpc_sleep_on_timeout(&req->rq_xprt->pending, task, xprt_timer,
1281 			jiffies + timeout);
1282 }
1283 EXPORT_SYMBOL_GPL(xprt_wait_for_reply_request_rtt);
1284 
1285 /**
1286  * xprt_request_wait_receive - wait for the reply to an RPC request
1287  * @task: RPC task about to send a request
1288  *
1289  */
xprt_request_wait_receive(struct rpc_task * task)1290 void xprt_request_wait_receive(struct rpc_task *task)
1291 {
1292 	struct rpc_rqst *req = task->tk_rqstp;
1293 	struct rpc_xprt *xprt = req->rq_xprt;
1294 
1295 	if (!test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate))
1296 		return;
1297 	/*
1298 	 * Sleep on the pending queue if we're expecting a reply.
1299 	 * The spinlock ensures atomicity between the test of
1300 	 * req->rq_reply_bytes_recvd, and the call to rpc_sleep_on().
1301 	 */
1302 	spin_lock(&xprt->queue_lock);
1303 	if (test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate)) {
1304 		xprt->ops->wait_for_reply_request(task);
1305 		/*
1306 		 * Send an extra queue wakeup call if the
1307 		 * connection was dropped in case the call to
1308 		 * rpc_sleep_on() raced.
1309 		 */
1310 		if (xprt_request_retransmit_after_disconnect(task))
1311 			rpc_wake_up_queued_task_set_status(&xprt->pending,
1312 					task, -ENOTCONN);
1313 	}
1314 	spin_unlock(&xprt->queue_lock);
1315 }
1316 
1317 static bool
xprt_request_need_enqueue_transmit(struct rpc_task * task,struct rpc_rqst * req)1318 xprt_request_need_enqueue_transmit(struct rpc_task *task, struct rpc_rqst *req)
1319 {
1320 	return !test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
1321 }
1322 
1323 /**
1324  * xprt_request_enqueue_transmit - queue a task for transmission
1325  * @task: pointer to rpc_task
1326  *
1327  * Add a task to the transmission queue.
1328  */
1329 void
xprt_request_enqueue_transmit(struct rpc_task * task)1330 xprt_request_enqueue_transmit(struct rpc_task *task)
1331 {
1332 	struct rpc_rqst *pos, *req = task->tk_rqstp;
1333 	struct rpc_xprt *xprt = req->rq_xprt;
1334 
1335 	if (xprt_request_need_enqueue_transmit(task, req)) {
1336 		req->rq_bytes_sent = 0;
1337 		spin_lock(&xprt->queue_lock);
1338 		/*
1339 		 * Requests that carry congestion control credits are added
1340 		 * to the head of the list to avoid starvation issues.
1341 		 */
1342 		if (req->rq_cong) {
1343 			xprt_clear_congestion_window_wait(xprt);
1344 			list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1345 				if (pos->rq_cong)
1346 					continue;
1347 				/* Note: req is added _before_ pos */
1348 				list_add_tail(&req->rq_xmit, &pos->rq_xmit);
1349 				INIT_LIST_HEAD(&req->rq_xmit2);
1350 				goto out;
1351 			}
1352 		} else if (!req->rq_seqno) {
1353 			list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1354 				if (pos->rq_task->tk_owner != task->tk_owner)
1355 					continue;
1356 				list_add_tail(&req->rq_xmit2, &pos->rq_xmit2);
1357 				INIT_LIST_HEAD(&req->rq_xmit);
1358 				goto out;
1359 			}
1360 		}
1361 		list_add_tail(&req->rq_xmit, &xprt->xmit_queue);
1362 		INIT_LIST_HEAD(&req->rq_xmit2);
1363 out:
1364 		atomic_long_inc(&xprt->xmit_queuelen);
1365 		set_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
1366 		spin_unlock(&xprt->queue_lock);
1367 	}
1368 }
1369 
1370 /**
1371  * xprt_request_dequeue_transmit_locked - remove a task from the transmission queue
1372  * @task: pointer to rpc_task
1373  *
1374  * Remove a task from the transmission queue
1375  * Caller must hold xprt->queue_lock
1376  */
1377 static void
xprt_request_dequeue_transmit_locked(struct rpc_task * task)1378 xprt_request_dequeue_transmit_locked(struct rpc_task *task)
1379 {
1380 	struct rpc_rqst *req = task->tk_rqstp;
1381 
1382 	if (!test_and_clear_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1383 		return;
1384 	if (!list_empty(&req->rq_xmit)) {
1385 		list_del(&req->rq_xmit);
1386 		if (!list_empty(&req->rq_xmit2)) {
1387 			struct rpc_rqst *next = list_first_entry(&req->rq_xmit2,
1388 					struct rpc_rqst, rq_xmit2);
1389 			list_del(&req->rq_xmit2);
1390 			list_add_tail(&next->rq_xmit, &next->rq_xprt->xmit_queue);
1391 		}
1392 	} else
1393 		list_del(&req->rq_xmit2);
1394 	atomic_long_dec(&req->rq_xprt->xmit_queuelen);
1395 }
1396 
1397 /**
1398  * xprt_request_dequeue_transmit - remove a task from the transmission queue
1399  * @task: pointer to rpc_task
1400  *
1401  * Remove a task from the transmission queue
1402  */
1403 static void
xprt_request_dequeue_transmit(struct rpc_task * task)1404 xprt_request_dequeue_transmit(struct rpc_task *task)
1405 {
1406 	struct rpc_rqst *req = task->tk_rqstp;
1407 	struct rpc_xprt *xprt = req->rq_xprt;
1408 
1409 	spin_lock(&xprt->queue_lock);
1410 	xprt_request_dequeue_transmit_locked(task);
1411 	spin_unlock(&xprt->queue_lock);
1412 }
1413 
1414 /**
1415  * xprt_request_dequeue_xprt - remove a task from the transmit+receive queue
1416  * @task: pointer to rpc_task
1417  *
1418  * Remove a task from the transmit and receive queues, and ensure that
1419  * it is not pinned by the receive work item.
1420  */
1421 void
xprt_request_dequeue_xprt(struct rpc_task * task)1422 xprt_request_dequeue_xprt(struct rpc_task *task)
1423 {
1424 	struct rpc_rqst	*req = task->tk_rqstp;
1425 	struct rpc_xprt *xprt = req->rq_xprt;
1426 
1427 	if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate) ||
1428 	    test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) ||
1429 	    xprt_is_pinned_rqst(req)) {
1430 		spin_lock(&xprt->queue_lock);
1431 		xprt_request_dequeue_transmit_locked(task);
1432 		xprt_request_dequeue_receive_locked(task);
1433 		while (xprt_is_pinned_rqst(req)) {
1434 			set_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
1435 			spin_unlock(&xprt->queue_lock);
1436 			xprt_wait_on_pinned_rqst(req);
1437 			spin_lock(&xprt->queue_lock);
1438 			clear_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
1439 		}
1440 		spin_unlock(&xprt->queue_lock);
1441 	}
1442 }
1443 
1444 /**
1445  * xprt_request_prepare - prepare an encoded request for transport
1446  * @req: pointer to rpc_rqst
1447  *
1448  * Calls into the transport layer to do whatever is needed to prepare
1449  * the request for transmission or receive.
1450  */
1451 void
xprt_request_prepare(struct rpc_rqst * req)1452 xprt_request_prepare(struct rpc_rqst *req)
1453 {
1454 	struct rpc_xprt *xprt = req->rq_xprt;
1455 
1456 	if (xprt->ops->prepare_request)
1457 		xprt->ops->prepare_request(req);
1458 }
1459 
1460 /**
1461  * xprt_request_need_retransmit - Test if a task needs retransmission
1462  * @task: pointer to rpc_task
1463  *
1464  * Test for whether a connection breakage requires the task to retransmit
1465  */
1466 bool
xprt_request_need_retransmit(struct rpc_task * task)1467 xprt_request_need_retransmit(struct rpc_task *task)
1468 {
1469 	return xprt_request_retransmit_after_disconnect(task);
1470 }
1471 
1472 /**
1473  * xprt_prepare_transmit - reserve the transport before sending a request
1474  * @task: RPC task about to send a request
1475  *
1476  */
xprt_prepare_transmit(struct rpc_task * task)1477 bool xprt_prepare_transmit(struct rpc_task *task)
1478 {
1479 	struct rpc_rqst	*req = task->tk_rqstp;
1480 	struct rpc_xprt	*xprt = req->rq_xprt;
1481 
1482 	if (!xprt_lock_write(xprt, task)) {
1483 		/* Race breaker: someone may have transmitted us */
1484 		if (!test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1485 			rpc_wake_up_queued_task_set_status(&xprt->sending,
1486 					task, 0);
1487 		return false;
1488 
1489 	}
1490 	return true;
1491 }
1492 
xprt_end_transmit(struct rpc_task * task)1493 void xprt_end_transmit(struct rpc_task *task)
1494 {
1495 	struct rpc_xprt	*xprt = task->tk_rqstp->rq_xprt;
1496 
1497 	xprt_inject_disconnect(xprt);
1498 	xprt_release_write(xprt, task);
1499 }
1500 
1501 /**
1502  * xprt_request_transmit - send an RPC request on a transport
1503  * @req: pointer to request to transmit
1504  * @snd_task: RPC task that owns the transport lock
1505  *
1506  * This performs the transmission of a single request.
1507  * Note that if the request is not the same as snd_task, then it
1508  * does need to be pinned.
1509  * Returns '0' on success.
1510  */
1511 static int
xprt_request_transmit(struct rpc_rqst * req,struct rpc_task * snd_task)1512 xprt_request_transmit(struct rpc_rqst *req, struct rpc_task *snd_task)
1513 {
1514 	struct rpc_xprt *xprt = req->rq_xprt;
1515 	struct rpc_task *task = req->rq_task;
1516 	unsigned int connect_cookie;
1517 	int is_retrans = RPC_WAS_SENT(task);
1518 	int status;
1519 
1520 	if (!req->rq_bytes_sent) {
1521 		if (xprt_request_data_received(task)) {
1522 			status = 0;
1523 			goto out_dequeue;
1524 		}
1525 		/* Verify that our message lies in the RPCSEC_GSS window */
1526 		if (rpcauth_xmit_need_reencode(task)) {
1527 			status = -EBADMSG;
1528 			goto out_dequeue;
1529 		}
1530 		if (RPC_SIGNALLED(task)) {
1531 			status = -ERESTARTSYS;
1532 			goto out_dequeue;
1533 		}
1534 	}
1535 
1536 	/*
1537 	 * Update req->rq_ntrans before transmitting to avoid races with
1538 	 * xprt_update_rtt(), which needs to know that it is recording a
1539 	 * reply to the first transmission.
1540 	 */
1541 	req->rq_ntrans++;
1542 
1543 	trace_rpc_xdr_sendto(task, &req->rq_snd_buf);
1544 	connect_cookie = xprt->connect_cookie;
1545 	status = xprt->ops->send_request(req);
1546 	if (status != 0) {
1547 		req->rq_ntrans--;
1548 		trace_xprt_transmit(req, status);
1549 		return status;
1550 	}
1551 
1552 	if (is_retrans) {
1553 		task->tk_client->cl_stats->rpcretrans++;
1554 		trace_xprt_retransmit(req);
1555 	}
1556 
1557 	xprt_inject_disconnect(xprt);
1558 
1559 	task->tk_flags |= RPC_TASK_SENT;
1560 	spin_lock(&xprt->transport_lock);
1561 
1562 	xprt->stat.sends++;
1563 	xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs;
1564 	xprt->stat.bklog_u += xprt->backlog.qlen;
1565 	xprt->stat.sending_u += xprt->sending.qlen;
1566 	xprt->stat.pending_u += xprt->pending.qlen;
1567 	spin_unlock(&xprt->transport_lock);
1568 
1569 	req->rq_connect_cookie = connect_cookie;
1570 out_dequeue:
1571 	trace_xprt_transmit(req, status);
1572 	xprt_request_dequeue_transmit(task);
1573 	rpc_wake_up_queued_task_set_status(&xprt->sending, task, status);
1574 	return status;
1575 }
1576 
1577 /**
1578  * xprt_transmit - send an RPC request on a transport
1579  * @task: controlling RPC task
1580  *
1581  * Attempts to drain the transmit queue. On exit, either the transport
1582  * signalled an error that needs to be handled before transmission can
1583  * resume, or @task finished transmitting, and detected that it already
1584  * received a reply.
1585  */
1586 void
xprt_transmit(struct rpc_task * task)1587 xprt_transmit(struct rpc_task *task)
1588 {
1589 	struct rpc_rqst *next, *req = task->tk_rqstp;
1590 	struct rpc_xprt	*xprt = req->rq_xprt;
1591 	int status;
1592 
1593 	spin_lock(&xprt->queue_lock);
1594 	for (;;) {
1595 		next = list_first_entry_or_null(&xprt->xmit_queue,
1596 						struct rpc_rqst, rq_xmit);
1597 		if (!next)
1598 			break;
1599 		xprt_pin_rqst(next);
1600 		spin_unlock(&xprt->queue_lock);
1601 		status = xprt_request_transmit(next, task);
1602 		if (status == -EBADMSG && next != req)
1603 			status = 0;
1604 		spin_lock(&xprt->queue_lock);
1605 		xprt_unpin_rqst(next);
1606 		if (status < 0) {
1607 			if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1608 				task->tk_status = status;
1609 			break;
1610 		}
1611 		/* Was @task transmitted, and has it received a reply? */
1612 		if (xprt_request_data_received(task) &&
1613 		    !test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1614 			break;
1615 		cond_resched_lock(&xprt->queue_lock);
1616 	}
1617 	spin_unlock(&xprt->queue_lock);
1618 }
1619 
xprt_complete_request_init(struct rpc_task * task)1620 static void xprt_complete_request_init(struct rpc_task *task)
1621 {
1622 	if (task->tk_rqstp)
1623 		xprt_request_init(task);
1624 }
1625 
xprt_add_backlog(struct rpc_xprt * xprt,struct rpc_task * task)1626 void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
1627 {
1628 	set_bit(XPRT_CONGESTED, &xprt->state);
1629 	rpc_sleep_on(&xprt->backlog, task, xprt_complete_request_init);
1630 }
1631 EXPORT_SYMBOL_GPL(xprt_add_backlog);
1632 
__xprt_set_rq(struct rpc_task * task,void * data)1633 static bool __xprt_set_rq(struct rpc_task *task, void *data)
1634 {
1635 	struct rpc_rqst *req = data;
1636 
1637 	if (task->tk_rqstp == NULL) {
1638 		memset(req, 0, sizeof(*req));	/* mark unused */
1639 		task->tk_rqstp = req;
1640 		return true;
1641 	}
1642 	return false;
1643 }
1644 
xprt_wake_up_backlog(struct rpc_xprt * xprt,struct rpc_rqst * req)1645 bool xprt_wake_up_backlog(struct rpc_xprt *xprt, struct rpc_rqst *req)
1646 {
1647 	if (rpc_wake_up_first(&xprt->backlog, __xprt_set_rq, req) == NULL) {
1648 		clear_bit(XPRT_CONGESTED, &xprt->state);
1649 		return false;
1650 	}
1651 	return true;
1652 }
1653 EXPORT_SYMBOL_GPL(xprt_wake_up_backlog);
1654 
xprt_throttle_congested(struct rpc_xprt * xprt,struct rpc_task * task)1655 static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task)
1656 {
1657 	bool ret = false;
1658 
1659 	if (!test_bit(XPRT_CONGESTED, &xprt->state))
1660 		goto out;
1661 	spin_lock(&xprt->reserve_lock);
1662 	if (test_bit(XPRT_CONGESTED, &xprt->state)) {
1663 		xprt_add_backlog(xprt, task);
1664 		ret = true;
1665 	}
1666 	spin_unlock(&xprt->reserve_lock);
1667 out:
1668 	return ret;
1669 }
1670 
xprt_dynamic_alloc_slot(struct rpc_xprt * xprt)1671 static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt)
1672 {
1673 	struct rpc_rqst *req = ERR_PTR(-EAGAIN);
1674 	gfp_t gfp_mask = GFP_KERNEL;
1675 
1676 	if (xprt->num_reqs >= xprt->max_reqs)
1677 		goto out;
1678 	++xprt->num_reqs;
1679 	spin_unlock(&xprt->reserve_lock);
1680 	if (current->flags & PF_WQ_WORKER)
1681 		gfp_mask |= __GFP_NORETRY | __GFP_NOWARN;
1682 	req = kzalloc(sizeof(*req), gfp_mask);
1683 	spin_lock(&xprt->reserve_lock);
1684 	if (req != NULL)
1685 		goto out;
1686 	--xprt->num_reqs;
1687 	req = ERR_PTR(-ENOMEM);
1688 out:
1689 	return req;
1690 }
1691 
xprt_dynamic_free_slot(struct rpc_xprt * xprt,struct rpc_rqst * req)1692 static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1693 {
1694 	if (xprt->num_reqs > xprt->min_reqs) {
1695 		--xprt->num_reqs;
1696 		kfree(req);
1697 		return true;
1698 	}
1699 	return false;
1700 }
1701 
xprt_alloc_slot(struct rpc_xprt * xprt,struct rpc_task * task)1702 void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
1703 {
1704 	struct rpc_rqst *req;
1705 
1706 	spin_lock(&xprt->reserve_lock);
1707 	if (!list_empty(&xprt->free)) {
1708 		req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
1709 		list_del(&req->rq_list);
1710 		goto out_init_req;
1711 	}
1712 	req = xprt_dynamic_alloc_slot(xprt);
1713 	if (!IS_ERR(req))
1714 		goto out_init_req;
1715 	switch (PTR_ERR(req)) {
1716 	case -ENOMEM:
1717 		dprintk("RPC:       dynamic allocation of request slot "
1718 				"failed! Retrying\n");
1719 		task->tk_status = -ENOMEM;
1720 		break;
1721 	case -EAGAIN:
1722 		xprt_add_backlog(xprt, task);
1723 		dprintk("RPC:       waiting for request slot\n");
1724 		fallthrough;
1725 	default:
1726 		task->tk_status = -EAGAIN;
1727 	}
1728 	spin_unlock(&xprt->reserve_lock);
1729 	return;
1730 out_init_req:
1731 	xprt->stat.max_slots = max_t(unsigned int, xprt->stat.max_slots,
1732 				     xprt->num_reqs);
1733 	spin_unlock(&xprt->reserve_lock);
1734 
1735 	task->tk_status = 0;
1736 	task->tk_rqstp = req;
1737 }
1738 EXPORT_SYMBOL_GPL(xprt_alloc_slot);
1739 
xprt_free_slot(struct rpc_xprt * xprt,struct rpc_rqst * req)1740 void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1741 {
1742 	spin_lock(&xprt->reserve_lock);
1743 	if (!xprt_wake_up_backlog(xprt, req) &&
1744 	    !xprt_dynamic_free_slot(xprt, req)) {
1745 		memset(req, 0, sizeof(*req));	/* mark unused */
1746 		list_add(&req->rq_list, &xprt->free);
1747 	}
1748 	spin_unlock(&xprt->reserve_lock);
1749 }
1750 EXPORT_SYMBOL_GPL(xprt_free_slot);
1751 
xprt_free_all_slots(struct rpc_xprt * xprt)1752 static void xprt_free_all_slots(struct rpc_xprt *xprt)
1753 {
1754 	struct rpc_rqst *req;
1755 	while (!list_empty(&xprt->free)) {
1756 		req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list);
1757 		list_del(&req->rq_list);
1758 		kfree(req);
1759 	}
1760 }
1761 
1762 static DEFINE_IDA(rpc_xprt_ids);
1763 
xprt_cleanup_ids(void)1764 void xprt_cleanup_ids(void)
1765 {
1766 	ida_destroy(&rpc_xprt_ids);
1767 }
1768 
xprt_alloc_id(struct rpc_xprt * xprt)1769 static int xprt_alloc_id(struct rpc_xprt *xprt)
1770 {
1771 	int id;
1772 
1773 	id = ida_simple_get(&rpc_xprt_ids, 0, 0, GFP_KERNEL);
1774 	if (id < 0)
1775 		return id;
1776 
1777 	xprt->id = id;
1778 	return 0;
1779 }
1780 
xprt_free_id(struct rpc_xprt * xprt)1781 static void xprt_free_id(struct rpc_xprt *xprt)
1782 {
1783 	ida_simple_remove(&rpc_xprt_ids, xprt->id);
1784 }
1785 
xprt_alloc(struct net * net,size_t size,unsigned int num_prealloc,unsigned int max_alloc)1786 struct rpc_xprt *xprt_alloc(struct net *net, size_t size,
1787 		unsigned int num_prealloc,
1788 		unsigned int max_alloc)
1789 {
1790 	struct rpc_xprt *xprt;
1791 	struct rpc_rqst *req;
1792 	int i;
1793 
1794 	xprt = kzalloc(size, GFP_KERNEL);
1795 	if (xprt == NULL)
1796 		goto out;
1797 
1798 	xprt_alloc_id(xprt);
1799 	xprt_init(xprt, net);
1800 
1801 	for (i = 0; i < num_prealloc; i++) {
1802 		req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL);
1803 		if (!req)
1804 			goto out_free;
1805 		list_add(&req->rq_list, &xprt->free);
1806 	}
1807 	if (max_alloc > num_prealloc)
1808 		xprt->max_reqs = max_alloc;
1809 	else
1810 		xprt->max_reqs = num_prealloc;
1811 	xprt->min_reqs = num_prealloc;
1812 	xprt->num_reqs = num_prealloc;
1813 
1814 	return xprt;
1815 
1816 out_free:
1817 	xprt_free(xprt);
1818 out:
1819 	return NULL;
1820 }
1821 EXPORT_SYMBOL_GPL(xprt_alloc);
1822 
xprt_free(struct rpc_xprt * xprt)1823 void xprt_free(struct rpc_xprt *xprt)
1824 {
1825 	put_net(xprt->xprt_net);
1826 	xprt_free_all_slots(xprt);
1827 	xprt_free_id(xprt);
1828 	rpc_sysfs_xprt_destroy(xprt);
1829 	kfree_rcu(xprt, rcu);
1830 }
1831 EXPORT_SYMBOL_GPL(xprt_free);
1832 
1833 static void
xprt_init_connect_cookie(struct rpc_rqst * req,struct rpc_xprt * xprt)1834 xprt_init_connect_cookie(struct rpc_rqst *req, struct rpc_xprt *xprt)
1835 {
1836 	req->rq_connect_cookie = xprt_connect_cookie(xprt) - 1;
1837 }
1838 
1839 static __be32
xprt_alloc_xid(struct rpc_xprt * xprt)1840 xprt_alloc_xid(struct rpc_xprt *xprt)
1841 {
1842 	__be32 xid;
1843 
1844 	spin_lock(&xprt->reserve_lock);
1845 	xid = (__force __be32)xprt->xid++;
1846 	spin_unlock(&xprt->reserve_lock);
1847 	return xid;
1848 }
1849 
1850 static void
xprt_init_xid(struct rpc_xprt * xprt)1851 xprt_init_xid(struct rpc_xprt *xprt)
1852 {
1853 	xprt->xid = prandom_u32();
1854 }
1855 
1856 static void
xprt_request_init(struct rpc_task * task)1857 xprt_request_init(struct rpc_task *task)
1858 {
1859 	struct rpc_xprt *xprt = task->tk_xprt;
1860 	struct rpc_rqst	*req = task->tk_rqstp;
1861 
1862 	req->rq_task	= task;
1863 	req->rq_xprt    = xprt;
1864 	req->rq_buffer  = NULL;
1865 	req->rq_xid	= xprt_alloc_xid(xprt);
1866 	xprt_init_connect_cookie(req, xprt);
1867 	req->rq_snd_buf.len = 0;
1868 	req->rq_snd_buf.buflen = 0;
1869 	req->rq_rcv_buf.len = 0;
1870 	req->rq_rcv_buf.buflen = 0;
1871 	req->rq_snd_buf.bvec = NULL;
1872 	req->rq_rcv_buf.bvec = NULL;
1873 	req->rq_release_snd_buf = NULL;
1874 	xprt_init_majortimeo(task, req);
1875 
1876 	trace_xprt_reserve(req);
1877 }
1878 
1879 static void
xprt_do_reserve(struct rpc_xprt * xprt,struct rpc_task * task)1880 xprt_do_reserve(struct rpc_xprt *xprt, struct rpc_task *task)
1881 {
1882 	xprt->ops->alloc_slot(xprt, task);
1883 	if (task->tk_rqstp != NULL)
1884 		xprt_request_init(task);
1885 }
1886 
1887 /**
1888  * xprt_reserve - allocate an RPC request slot
1889  * @task: RPC task requesting a slot allocation
1890  *
1891  * If the transport is marked as being congested, or if no more
1892  * slots are available, place the task on the transport's
1893  * backlog queue.
1894  */
xprt_reserve(struct rpc_task * task)1895 void xprt_reserve(struct rpc_task *task)
1896 {
1897 	struct rpc_xprt *xprt = task->tk_xprt;
1898 
1899 	task->tk_status = 0;
1900 	if (task->tk_rqstp != NULL)
1901 		return;
1902 
1903 	task->tk_status = -EAGAIN;
1904 	if (!xprt_throttle_congested(xprt, task))
1905 		xprt_do_reserve(xprt, task);
1906 }
1907 
1908 /**
1909  * xprt_retry_reserve - allocate an RPC request slot
1910  * @task: RPC task requesting a slot allocation
1911  *
1912  * If no more slots are available, place the task on the transport's
1913  * backlog queue.
1914  * Note that the only difference with xprt_reserve is that we now
1915  * ignore the value of the XPRT_CONGESTED flag.
1916  */
xprt_retry_reserve(struct rpc_task * task)1917 void xprt_retry_reserve(struct rpc_task *task)
1918 {
1919 	struct rpc_xprt *xprt = task->tk_xprt;
1920 
1921 	task->tk_status = 0;
1922 	if (task->tk_rqstp != NULL)
1923 		return;
1924 
1925 	task->tk_status = -EAGAIN;
1926 	xprt_do_reserve(xprt, task);
1927 }
1928 
1929 /**
1930  * xprt_release - release an RPC request slot
1931  * @task: task which is finished with the slot
1932  *
1933  */
xprt_release(struct rpc_task * task)1934 void xprt_release(struct rpc_task *task)
1935 {
1936 	struct rpc_xprt	*xprt;
1937 	struct rpc_rqst	*req = task->tk_rqstp;
1938 
1939 	if (req == NULL) {
1940 		if (task->tk_client) {
1941 			xprt = task->tk_xprt;
1942 			xprt_release_write(xprt, task);
1943 		}
1944 		return;
1945 	}
1946 
1947 	xprt = req->rq_xprt;
1948 	xprt_request_dequeue_xprt(task);
1949 	spin_lock(&xprt->transport_lock);
1950 	xprt->ops->release_xprt(xprt, task);
1951 	if (xprt->ops->release_request)
1952 		xprt->ops->release_request(task);
1953 	xprt_schedule_autodisconnect(xprt);
1954 	spin_unlock(&xprt->transport_lock);
1955 	if (req->rq_buffer)
1956 		xprt->ops->buf_free(task);
1957 	xdr_free_bvec(&req->rq_rcv_buf);
1958 	xdr_free_bvec(&req->rq_snd_buf);
1959 	if (req->rq_cred != NULL)
1960 		put_rpccred(req->rq_cred);
1961 	if (req->rq_release_snd_buf)
1962 		req->rq_release_snd_buf(req);
1963 
1964 	task->tk_rqstp = NULL;
1965 	if (likely(!bc_prealloc(req)))
1966 		xprt->ops->free_slot(xprt, req);
1967 	else
1968 		xprt_free_bc_request(req);
1969 }
1970 
1971 #ifdef CONFIG_SUNRPC_BACKCHANNEL
1972 void
xprt_init_bc_request(struct rpc_rqst * req,struct rpc_task * task)1973 xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task)
1974 {
1975 	struct xdr_buf *xbufp = &req->rq_snd_buf;
1976 
1977 	task->tk_rqstp = req;
1978 	req->rq_task = task;
1979 	xprt_init_connect_cookie(req, req->rq_xprt);
1980 	/*
1981 	 * Set up the xdr_buf length.
1982 	 * This also indicates that the buffer is XDR encoded already.
1983 	 */
1984 	xbufp->len = xbufp->head[0].iov_len + xbufp->page_len +
1985 		xbufp->tail[0].iov_len;
1986 }
1987 #endif
1988 
xprt_init(struct rpc_xprt * xprt,struct net * net)1989 static void xprt_init(struct rpc_xprt *xprt, struct net *net)
1990 {
1991 	kref_init(&xprt->kref);
1992 
1993 	spin_lock_init(&xprt->transport_lock);
1994 	spin_lock_init(&xprt->reserve_lock);
1995 	spin_lock_init(&xprt->queue_lock);
1996 
1997 	INIT_LIST_HEAD(&xprt->free);
1998 	xprt->recv_queue = RB_ROOT;
1999 	INIT_LIST_HEAD(&xprt->xmit_queue);
2000 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
2001 	spin_lock_init(&xprt->bc_pa_lock);
2002 	INIT_LIST_HEAD(&xprt->bc_pa_list);
2003 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
2004 	INIT_LIST_HEAD(&xprt->xprt_switch);
2005 
2006 	xprt->last_used = jiffies;
2007 	xprt->cwnd = RPC_INITCWND;
2008 	xprt->bind_index = 0;
2009 
2010 	rpc_init_wait_queue(&xprt->binding, "xprt_binding");
2011 	rpc_init_wait_queue(&xprt->pending, "xprt_pending");
2012 	rpc_init_wait_queue(&xprt->sending, "xprt_sending");
2013 	rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");
2014 
2015 	xprt_init_xid(xprt);
2016 
2017 	xprt->xprt_net = get_net(net);
2018 }
2019 
2020 /**
2021  * xprt_create_transport - create an RPC transport
2022  * @args: rpc transport creation arguments
2023  *
2024  */
xprt_create_transport(struct xprt_create * args)2025 struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
2026 {
2027 	struct rpc_xprt	*xprt;
2028 	const struct xprt_class *t;
2029 
2030 	t = xprt_class_find_by_ident(args->ident);
2031 	if (!t) {
2032 		dprintk("RPC: transport (%d) not supported\n", args->ident);
2033 		return ERR_PTR(-EIO);
2034 	}
2035 
2036 	xprt = t->setup(args);
2037 	xprt_class_release(t);
2038 
2039 	if (IS_ERR(xprt))
2040 		goto out;
2041 	if (args->flags & XPRT_CREATE_NO_IDLE_TIMEOUT)
2042 		xprt->idle_timeout = 0;
2043 	INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
2044 	if (xprt_has_timer(xprt))
2045 		timer_setup(&xprt->timer, xprt_init_autodisconnect, 0);
2046 	else
2047 		timer_setup(&xprt->timer, NULL, 0);
2048 
2049 	if (strlen(args->servername) > RPC_MAXNETNAMELEN) {
2050 		xprt_destroy(xprt);
2051 		return ERR_PTR(-EINVAL);
2052 	}
2053 	xprt->servername = kstrdup(args->servername, GFP_KERNEL);
2054 	if (xprt->servername == NULL) {
2055 		xprt_destroy(xprt);
2056 		return ERR_PTR(-ENOMEM);
2057 	}
2058 
2059 	rpc_xprt_debugfs_register(xprt);
2060 
2061 	trace_xprt_create(xprt);
2062 out:
2063 	return xprt;
2064 }
2065 
xprt_destroy_cb(struct work_struct * work)2066 static void xprt_destroy_cb(struct work_struct *work)
2067 {
2068 	struct rpc_xprt *xprt =
2069 		container_of(work, struct rpc_xprt, task_cleanup);
2070 
2071 	trace_xprt_destroy(xprt);
2072 
2073 	rpc_xprt_debugfs_unregister(xprt);
2074 	rpc_destroy_wait_queue(&xprt->binding);
2075 	rpc_destroy_wait_queue(&xprt->pending);
2076 	rpc_destroy_wait_queue(&xprt->sending);
2077 	rpc_destroy_wait_queue(&xprt->backlog);
2078 	kfree(xprt->servername);
2079 	/*
2080 	 * Destroy any existing back channel
2081 	 */
2082 	xprt_destroy_backchannel(xprt, UINT_MAX);
2083 
2084 	/*
2085 	 * Tear down transport state and free the rpc_xprt
2086 	 */
2087 	xprt->ops->destroy(xprt);
2088 }
2089 
2090 /**
2091  * xprt_destroy - destroy an RPC transport, killing off all requests.
2092  * @xprt: transport to destroy
2093  *
2094  */
xprt_destroy(struct rpc_xprt * xprt)2095 static void xprt_destroy(struct rpc_xprt *xprt)
2096 {
2097 	/*
2098 	 * Exclude transport connect/disconnect handlers and autoclose
2099 	 */
2100 	wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE);
2101 
2102 	/*
2103 	 * xprt_schedule_autodisconnect() can run after XPRT_LOCKED
2104 	 * is cleared.  We use ->transport_lock to ensure the mod_timer()
2105 	 * can only run *before* del_time_sync(), never after.
2106 	 */
2107 	spin_lock(&xprt->transport_lock);
2108 	del_timer_sync(&xprt->timer);
2109 	spin_unlock(&xprt->transport_lock);
2110 
2111 	/*
2112 	 * Destroy sockets etc from the system workqueue so they can
2113 	 * safely flush receive work running on rpciod.
2114 	 */
2115 	INIT_WORK(&xprt->task_cleanup, xprt_destroy_cb);
2116 	schedule_work(&xprt->task_cleanup);
2117 }
2118 
xprt_destroy_kref(struct kref * kref)2119 static void xprt_destroy_kref(struct kref *kref)
2120 {
2121 	xprt_destroy(container_of(kref, struct rpc_xprt, kref));
2122 }
2123 
2124 /**
2125  * xprt_get - return a reference to an RPC transport.
2126  * @xprt: pointer to the transport
2127  *
2128  */
xprt_get(struct rpc_xprt * xprt)2129 struct rpc_xprt *xprt_get(struct rpc_xprt *xprt)
2130 {
2131 	if (xprt != NULL && kref_get_unless_zero(&xprt->kref))
2132 		return xprt;
2133 	return NULL;
2134 }
2135 EXPORT_SYMBOL_GPL(xprt_get);
2136 
2137 /**
2138  * xprt_put - release a reference to an RPC transport.
2139  * @xprt: pointer to the transport
2140  *
2141  */
xprt_put(struct rpc_xprt * xprt)2142 void xprt_put(struct rpc_xprt *xprt)
2143 {
2144 	if (xprt != NULL)
2145 		kref_put(&xprt->kref, xprt_destroy_kref);
2146 }
2147 EXPORT_SYMBOL_GPL(xprt_put);
2148