• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/net/sunrpc/xprt.c
4  *
5  *  This is a generic RPC call interface supporting congestion avoidance,
6  *  and asynchronous calls.
7  *
8  *  The interface works like this:
9  *
10  *  -	When a process places a call, it allocates a request slot if
11  *	one is available. Otherwise, it sleeps on the backlog queue
12  *	(xprt_reserve).
13  *  -	Next, the caller puts together the RPC message, stuffs it into
14  *	the request struct, and calls xprt_transmit().
15  *  -	xprt_transmit sends the message and installs the caller on the
16  *	transport's wait list. At the same time, if a reply is expected,
17  *	it installs a timer that is run after the packet's timeout has
18  *	expired.
19  *  -	When a packet arrives, the data_ready handler walks the list of
20  *	pending requests for that transport. If a matching XID is found, the
21  *	caller is woken up, and the timer removed.
22  *  -	When no reply arrives within the timeout interval, the timer is
23  *	fired by the kernel and runs xprt_timer(). It either adjusts the
24  *	timeout values (minor timeout) or wakes up the caller with a status
25  *	of -ETIMEDOUT.
26  *  -	When the caller receives a notification from RPC that a reply arrived,
27  *	it should release the RPC slot, and process the reply.
28  *	If the call timed out, it may choose to retry the operation by
29  *	adjusting the initial timeout value, and simply calling rpc_call
30  *	again.
31  *
32  *  Support for async RPC is done through a set of RPC-specific scheduling
33  *  primitives that `transparently' work for processes as well as async
34  *  tasks that rely on callbacks.
35  *
36  *  Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
37  *
38  *  Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com>
39  */
40 
41 #include <linux/module.h>
42 
43 #include <linux/types.h>
44 #include <linux/interrupt.h>
45 #include <linux/workqueue.h>
46 #include <linux/net.h>
47 #include <linux/ktime.h>
48 
49 #include <linux/sunrpc/clnt.h>
50 #include <linux/sunrpc/metrics.h>
51 #include <linux/sunrpc/bc_xprt.h>
52 #include <linux/rcupdate.h>
53 #include <linux/sched/mm.h>
54 
55 #include <trace/events/sunrpc.h>
56 
57 #include "sunrpc.h"
58 
59 /*
60  * Local variables
61  */
62 
63 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
64 # define RPCDBG_FACILITY	RPCDBG_XPRT
65 #endif
66 
67 /*
68  * Local functions
69  */
70 static void	 xprt_init(struct rpc_xprt *xprt, struct net *net);
71 static __be32	xprt_alloc_xid(struct rpc_xprt *xprt);
72 static void	 xprt_destroy(struct rpc_xprt *xprt);
73 static void	 xprt_request_init(struct rpc_task *task);
74 
75 static DEFINE_SPINLOCK(xprt_list_lock);
76 static LIST_HEAD(xprt_list);
77 
xprt_request_timeout(const struct rpc_rqst * req)78 static unsigned long xprt_request_timeout(const struct rpc_rqst *req)
79 {
80 	unsigned long timeout = jiffies + req->rq_timeout;
81 
82 	if (time_before(timeout, req->rq_majortimeo))
83 		return timeout;
84 	return req->rq_majortimeo;
85 }
86 
87 /**
88  * xprt_register_transport - register a transport implementation
89  * @transport: transport to register
90  *
91  * If a transport implementation is loaded as a kernel module, it can
92  * call this interface to make itself known to the RPC client.
93  *
94  * Returns:
95  * 0:		transport successfully registered
96  * -EEXIST:	transport already registered
97  * -EINVAL:	transport module being unloaded
98  */
xprt_register_transport(struct xprt_class * transport)99 int xprt_register_transport(struct xprt_class *transport)
100 {
101 	struct xprt_class *t;
102 	int result;
103 
104 	result = -EEXIST;
105 	spin_lock(&xprt_list_lock);
106 	list_for_each_entry(t, &xprt_list, list) {
107 		/* don't register the same transport class twice */
108 		if (t->ident == transport->ident)
109 			goto out;
110 	}
111 
112 	list_add_tail(&transport->list, &xprt_list);
113 	printk(KERN_INFO "RPC: Registered %s transport module.\n",
114 	       transport->name);
115 	result = 0;
116 
117 out:
118 	spin_unlock(&xprt_list_lock);
119 	return result;
120 }
121 EXPORT_SYMBOL_GPL(xprt_register_transport);
122 
123 /**
124  * xprt_unregister_transport - unregister a transport implementation
125  * @transport: transport to unregister
126  *
127  * Returns:
128  * 0:		transport successfully unregistered
129  * -ENOENT:	transport never registered
130  */
xprt_unregister_transport(struct xprt_class * transport)131 int xprt_unregister_transport(struct xprt_class *transport)
132 {
133 	struct xprt_class *t;
134 	int result;
135 
136 	result = 0;
137 	spin_lock(&xprt_list_lock);
138 	list_for_each_entry(t, &xprt_list, list) {
139 		if (t == transport) {
140 			printk(KERN_INFO
141 				"RPC: Unregistered %s transport module.\n",
142 				transport->name);
143 			list_del_init(&transport->list);
144 			goto out;
145 		}
146 	}
147 	result = -ENOENT;
148 
149 out:
150 	spin_unlock(&xprt_list_lock);
151 	return result;
152 }
153 EXPORT_SYMBOL_GPL(xprt_unregister_transport);
154 
155 static void
xprt_class_release(const struct xprt_class * t)156 xprt_class_release(const struct xprt_class *t)
157 {
158 	module_put(t->owner);
159 }
160 
161 static const struct xprt_class *
xprt_class_find_by_netid_locked(const char * netid)162 xprt_class_find_by_netid_locked(const char *netid)
163 {
164 	const struct xprt_class *t;
165 	unsigned int i;
166 
167 	list_for_each_entry(t, &xprt_list, list) {
168 		for (i = 0; t->netid[i][0] != '\0'; i++) {
169 			if (strcmp(t->netid[i], netid) != 0)
170 				continue;
171 			if (!try_module_get(t->owner))
172 				continue;
173 			return t;
174 		}
175 	}
176 	return NULL;
177 }
178 
179 static const struct xprt_class *
xprt_class_find_by_netid(const char * netid)180 xprt_class_find_by_netid(const char *netid)
181 {
182 	const struct xprt_class *t;
183 
184 	spin_lock(&xprt_list_lock);
185 	t = xprt_class_find_by_netid_locked(netid);
186 	if (!t) {
187 		spin_unlock(&xprt_list_lock);
188 		request_module("rpc%s", netid);
189 		spin_lock(&xprt_list_lock);
190 		t = xprt_class_find_by_netid_locked(netid);
191 	}
192 	spin_unlock(&xprt_list_lock);
193 	return t;
194 }
195 
196 /**
197  * xprt_load_transport - load a transport implementation
198  * @netid: transport to load
199  *
200  * Returns:
201  * 0:		transport successfully loaded
202  * -ENOENT:	transport module not available
203  */
xprt_load_transport(const char * netid)204 int xprt_load_transport(const char *netid)
205 {
206 	const struct xprt_class *t;
207 
208 	t = xprt_class_find_by_netid(netid);
209 	if (!t)
210 		return -ENOENT;
211 	xprt_class_release(t);
212 	return 0;
213 }
214 EXPORT_SYMBOL_GPL(xprt_load_transport);
215 
xprt_clear_locked(struct rpc_xprt * xprt)216 static void xprt_clear_locked(struct rpc_xprt *xprt)
217 {
218 	xprt->snd_task = NULL;
219 	if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
220 		smp_mb__before_atomic();
221 		clear_bit(XPRT_LOCKED, &xprt->state);
222 		smp_mb__after_atomic();
223 	} else
224 		queue_work(xprtiod_workqueue, &xprt->task_cleanup);
225 }
226 
227 /**
228  * xprt_reserve_xprt - serialize write access to transports
229  * @task: task that is requesting access to the transport
230  * @xprt: pointer to the target transport
231  *
232  * This prevents mixing the payload of separate requests, and prevents
233  * transport connects from colliding with writes.  No congestion control
234  * is provided.
235  */
xprt_reserve_xprt(struct rpc_xprt * xprt,struct rpc_task * task)236 int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
237 {
238 	struct rpc_rqst *req = task->tk_rqstp;
239 
240 	if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
241 		if (task == xprt->snd_task)
242 			goto out_locked;
243 		goto out_sleep;
244 	}
245 	if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
246 		goto out_unlock;
247 	xprt->snd_task = task;
248 
249 out_locked:
250 	trace_xprt_reserve_xprt(xprt, task);
251 	return 1;
252 
253 out_unlock:
254 	xprt_clear_locked(xprt);
255 out_sleep:
256 	task->tk_status = -EAGAIN;
257 	if  (RPC_IS_SOFT(task))
258 		rpc_sleep_on_timeout(&xprt->sending, task, NULL,
259 				xprt_request_timeout(req));
260 	else
261 		rpc_sleep_on(&xprt->sending, task, NULL);
262 	return 0;
263 }
264 EXPORT_SYMBOL_GPL(xprt_reserve_xprt);
265 
266 static bool
xprt_need_congestion_window_wait(struct rpc_xprt * xprt)267 xprt_need_congestion_window_wait(struct rpc_xprt *xprt)
268 {
269 	return test_bit(XPRT_CWND_WAIT, &xprt->state);
270 }
271 
272 static void
xprt_set_congestion_window_wait(struct rpc_xprt * xprt)273 xprt_set_congestion_window_wait(struct rpc_xprt *xprt)
274 {
275 	if (!list_empty(&xprt->xmit_queue)) {
276 		/* Peek at head of queue to see if it can make progress */
277 		if (list_first_entry(&xprt->xmit_queue, struct rpc_rqst,
278 					rq_xmit)->rq_cong)
279 			return;
280 	}
281 	set_bit(XPRT_CWND_WAIT, &xprt->state);
282 }
283 
284 static void
xprt_test_and_clear_congestion_window_wait(struct rpc_xprt * xprt)285 xprt_test_and_clear_congestion_window_wait(struct rpc_xprt *xprt)
286 {
287 	if (!RPCXPRT_CONGESTED(xprt))
288 		clear_bit(XPRT_CWND_WAIT, &xprt->state);
289 }
290 
291 /*
292  * xprt_reserve_xprt_cong - serialize write access to transports
293  * @task: task that is requesting access to the transport
294  *
295  * Same as xprt_reserve_xprt, but Van Jacobson congestion control is
296  * integrated into the decision of whether a request is allowed to be
297  * woken up and given access to the transport.
298  * Note that the lock is only granted if we know there are free slots.
299  */
xprt_reserve_xprt_cong(struct rpc_xprt * xprt,struct rpc_task * task)300 int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
301 {
302 	struct rpc_rqst *req = task->tk_rqstp;
303 
304 	if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
305 		if (task == xprt->snd_task)
306 			goto out_locked;
307 		goto out_sleep;
308 	}
309 	if (req == NULL) {
310 		xprt->snd_task = task;
311 		goto out_locked;
312 	}
313 	if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
314 		goto out_unlock;
315 	if (!xprt_need_congestion_window_wait(xprt)) {
316 		xprt->snd_task = task;
317 		goto out_locked;
318 	}
319 out_unlock:
320 	xprt_clear_locked(xprt);
321 out_sleep:
322 	task->tk_status = -EAGAIN;
323 	if (RPC_IS_SOFT(task))
324 		rpc_sleep_on_timeout(&xprt->sending, task, NULL,
325 				xprt_request_timeout(req));
326 	else
327 		rpc_sleep_on(&xprt->sending, task, NULL);
328 	return 0;
329 out_locked:
330 	trace_xprt_reserve_cong(xprt, task);
331 	return 1;
332 }
333 EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong);
334 
xprt_lock_write(struct rpc_xprt * xprt,struct rpc_task * task)335 static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
336 {
337 	int retval;
338 
339 	if (test_bit(XPRT_LOCKED, &xprt->state) && xprt->snd_task == task)
340 		return 1;
341 	spin_lock(&xprt->transport_lock);
342 	retval = xprt->ops->reserve_xprt(xprt, task);
343 	spin_unlock(&xprt->transport_lock);
344 	return retval;
345 }
346 
__xprt_lock_write_func(struct rpc_task * task,void * data)347 static bool __xprt_lock_write_func(struct rpc_task *task, void *data)
348 {
349 	struct rpc_xprt *xprt = data;
350 
351 	xprt->snd_task = task;
352 	return true;
353 }
354 
__xprt_lock_write_next(struct rpc_xprt * xprt)355 static void __xprt_lock_write_next(struct rpc_xprt *xprt)
356 {
357 	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
358 		return;
359 	if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
360 		goto out_unlock;
361 	if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending,
362 				__xprt_lock_write_func, xprt))
363 		return;
364 out_unlock:
365 	xprt_clear_locked(xprt);
366 }
367 
__xprt_lock_write_next_cong(struct rpc_xprt * xprt)368 static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
369 {
370 	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
371 		return;
372 	if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
373 		goto out_unlock;
374 	if (xprt_need_congestion_window_wait(xprt))
375 		goto out_unlock;
376 	if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending,
377 				__xprt_lock_write_func, xprt))
378 		return;
379 out_unlock:
380 	xprt_clear_locked(xprt);
381 }
382 
383 /**
384  * xprt_release_xprt - allow other requests to use a transport
385  * @xprt: transport with other tasks potentially waiting
386  * @task: task that is releasing access to the transport
387  *
388  * Note that "task" can be NULL.  No congestion control is provided.
389  */
xprt_release_xprt(struct rpc_xprt * xprt,struct rpc_task * task)390 void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
391 {
392 	if (xprt->snd_task == task) {
393 		xprt_clear_locked(xprt);
394 		__xprt_lock_write_next(xprt);
395 	}
396 	trace_xprt_release_xprt(xprt, task);
397 }
398 EXPORT_SYMBOL_GPL(xprt_release_xprt);
399 
400 /**
401  * xprt_release_xprt_cong - allow other requests to use a transport
402  * @xprt: transport with other tasks potentially waiting
403  * @task: task that is releasing access to the transport
404  *
405  * Note that "task" can be NULL.  Another task is awoken to use the
406  * transport if the transport's congestion window allows it.
407  */
xprt_release_xprt_cong(struct rpc_xprt * xprt,struct rpc_task * task)408 void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
409 {
410 	if (xprt->snd_task == task) {
411 		xprt_clear_locked(xprt);
412 		__xprt_lock_write_next_cong(xprt);
413 	}
414 	trace_xprt_release_cong(xprt, task);
415 }
416 EXPORT_SYMBOL_GPL(xprt_release_xprt_cong);
417 
xprt_release_write(struct rpc_xprt * xprt,struct rpc_task * task)418 static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
419 {
420 	if (xprt->snd_task != task)
421 		return;
422 	spin_lock(&xprt->transport_lock);
423 	xprt->ops->release_xprt(xprt, task);
424 	spin_unlock(&xprt->transport_lock);
425 }
426 
427 /*
428  * Van Jacobson congestion avoidance. Check if the congestion window
429  * overflowed. Put the task to sleep if this is the case.
430  */
431 static int
__xprt_get_cong(struct rpc_xprt * xprt,struct rpc_rqst * req)432 __xprt_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
433 {
434 	if (req->rq_cong)
435 		return 1;
436 	trace_xprt_get_cong(xprt, req->rq_task);
437 	if (RPCXPRT_CONGESTED(xprt)) {
438 		xprt_set_congestion_window_wait(xprt);
439 		return 0;
440 	}
441 	req->rq_cong = 1;
442 	xprt->cong += RPC_CWNDSCALE;
443 	return 1;
444 }
445 
446 /*
447  * Adjust the congestion window, and wake up the next task
448  * that has been sleeping due to congestion
449  */
450 static void
__xprt_put_cong(struct rpc_xprt * xprt,struct rpc_rqst * req)451 __xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
452 {
453 	if (!req->rq_cong)
454 		return;
455 	req->rq_cong = 0;
456 	xprt->cong -= RPC_CWNDSCALE;
457 	xprt_test_and_clear_congestion_window_wait(xprt);
458 	trace_xprt_put_cong(xprt, req->rq_task);
459 	__xprt_lock_write_next_cong(xprt);
460 }
461 
462 /**
463  * xprt_request_get_cong - Request congestion control credits
464  * @xprt: pointer to transport
465  * @req: pointer to RPC request
466  *
467  * Useful for transports that require congestion control.
468  */
469 bool
xprt_request_get_cong(struct rpc_xprt * xprt,struct rpc_rqst * req)470 xprt_request_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
471 {
472 	bool ret = false;
473 
474 	if (req->rq_cong)
475 		return true;
476 	spin_lock(&xprt->transport_lock);
477 	ret = __xprt_get_cong(xprt, req) != 0;
478 	spin_unlock(&xprt->transport_lock);
479 	return ret;
480 }
481 EXPORT_SYMBOL_GPL(xprt_request_get_cong);
482 
483 /**
484  * xprt_release_rqst_cong - housekeeping when request is complete
485  * @task: RPC request that recently completed
486  *
487  * Useful for transports that require congestion control.
488  */
xprt_release_rqst_cong(struct rpc_task * task)489 void xprt_release_rqst_cong(struct rpc_task *task)
490 {
491 	struct rpc_rqst *req = task->tk_rqstp;
492 
493 	__xprt_put_cong(req->rq_xprt, req);
494 }
495 EXPORT_SYMBOL_GPL(xprt_release_rqst_cong);
496 
xprt_clear_congestion_window_wait_locked(struct rpc_xprt * xprt)497 static void xprt_clear_congestion_window_wait_locked(struct rpc_xprt *xprt)
498 {
499 	if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state))
500 		__xprt_lock_write_next_cong(xprt);
501 }
502 
503 /*
504  * Clear the congestion window wait flag and wake up the next
505  * entry on xprt->sending
506  */
507 static void
xprt_clear_congestion_window_wait(struct rpc_xprt * xprt)508 xprt_clear_congestion_window_wait(struct rpc_xprt *xprt)
509 {
510 	if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state)) {
511 		spin_lock(&xprt->transport_lock);
512 		__xprt_lock_write_next_cong(xprt);
513 		spin_unlock(&xprt->transport_lock);
514 	}
515 }
516 
517 /**
518  * xprt_adjust_cwnd - adjust transport congestion window
519  * @xprt: pointer to xprt
520  * @task: recently completed RPC request used to adjust window
521  * @result: result code of completed RPC request
522  *
523  * The transport code maintains an estimate on the maximum number of out-
524  * standing RPC requests, using a smoothed version of the congestion
525  * avoidance implemented in 44BSD. This is basically the Van Jacobson
526  * congestion algorithm: If a retransmit occurs, the congestion window is
527  * halved; otherwise, it is incremented by 1/cwnd when
528  *
529  *	-	a reply is received and
530  *	-	a full number of requests are outstanding and
531  *	-	the congestion window hasn't been updated recently.
532  */
xprt_adjust_cwnd(struct rpc_xprt * xprt,struct rpc_task * task,int result)533 void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result)
534 {
535 	struct rpc_rqst *req = task->tk_rqstp;
536 	unsigned long cwnd = xprt->cwnd;
537 
538 	if (result >= 0 && cwnd <= xprt->cong) {
539 		/* The (cwnd >> 1) term makes sure
540 		 * the result gets rounded properly. */
541 		cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd;
542 		if (cwnd > RPC_MAXCWND(xprt))
543 			cwnd = RPC_MAXCWND(xprt);
544 		__xprt_lock_write_next_cong(xprt);
545 	} else if (result == -ETIMEDOUT) {
546 		cwnd >>= 1;
547 		if (cwnd < RPC_CWNDSCALE)
548 			cwnd = RPC_CWNDSCALE;
549 	}
550 	dprintk("RPC:       cong %ld, cwnd was %ld, now %ld\n",
551 			xprt->cong, xprt->cwnd, cwnd);
552 	xprt->cwnd = cwnd;
553 	__xprt_put_cong(xprt, req);
554 }
555 EXPORT_SYMBOL_GPL(xprt_adjust_cwnd);
556 
557 /**
558  * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue
559  * @xprt: transport with waiting tasks
560  * @status: result code to plant in each task before waking it
561  *
562  */
xprt_wake_pending_tasks(struct rpc_xprt * xprt,int status)563 void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status)
564 {
565 	if (status < 0)
566 		rpc_wake_up_status(&xprt->pending, status);
567 	else
568 		rpc_wake_up(&xprt->pending);
569 }
570 EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks);
571 
572 /**
573  * xprt_wait_for_buffer_space - wait for transport output buffer to clear
574  * @xprt: transport
575  *
576  * Note that we only set the timer for the case of RPC_IS_SOFT(), since
577  * we don't in general want to force a socket disconnection due to
578  * an incomplete RPC call transmission.
579  */
xprt_wait_for_buffer_space(struct rpc_xprt * xprt)580 void xprt_wait_for_buffer_space(struct rpc_xprt *xprt)
581 {
582 	set_bit(XPRT_WRITE_SPACE, &xprt->state);
583 }
584 EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space);
585 
586 static bool
xprt_clear_write_space_locked(struct rpc_xprt * xprt)587 xprt_clear_write_space_locked(struct rpc_xprt *xprt)
588 {
589 	if (test_and_clear_bit(XPRT_WRITE_SPACE, &xprt->state)) {
590 		__xprt_lock_write_next(xprt);
591 		dprintk("RPC:       write space: waking waiting task on "
592 				"xprt %p\n", xprt);
593 		return true;
594 	}
595 	return false;
596 }
597 
598 /**
599  * xprt_write_space - wake the task waiting for transport output buffer space
600  * @xprt: transport with waiting tasks
601  *
602  * Can be called in a soft IRQ context, so xprt_write_space never sleeps.
603  */
xprt_write_space(struct rpc_xprt * xprt)604 bool xprt_write_space(struct rpc_xprt *xprt)
605 {
606 	bool ret;
607 
608 	if (!test_bit(XPRT_WRITE_SPACE, &xprt->state))
609 		return false;
610 	spin_lock(&xprt->transport_lock);
611 	ret = xprt_clear_write_space_locked(xprt);
612 	spin_unlock(&xprt->transport_lock);
613 	return ret;
614 }
615 EXPORT_SYMBOL_GPL(xprt_write_space);
616 
xprt_abs_ktime_to_jiffies(ktime_t abstime)617 static unsigned long xprt_abs_ktime_to_jiffies(ktime_t abstime)
618 {
619 	s64 delta = ktime_to_ns(ktime_get() - abstime);
620 	return likely(delta >= 0) ?
621 		jiffies - nsecs_to_jiffies(delta) :
622 		jiffies + nsecs_to_jiffies(-delta);
623 }
624 
xprt_calc_majortimeo(struct rpc_rqst * req)625 static unsigned long xprt_calc_majortimeo(struct rpc_rqst *req)
626 {
627 	const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
628 	unsigned long majortimeo = req->rq_timeout;
629 
630 	if (to->to_exponential)
631 		majortimeo <<= to->to_retries;
632 	else
633 		majortimeo += to->to_increment * to->to_retries;
634 	if (majortimeo > to->to_maxval || majortimeo == 0)
635 		majortimeo = to->to_maxval;
636 	return majortimeo;
637 }
638 
xprt_reset_majortimeo(struct rpc_rqst * req)639 static void xprt_reset_majortimeo(struct rpc_rqst *req)
640 {
641 	req->rq_majortimeo += xprt_calc_majortimeo(req);
642 }
643 
xprt_reset_minortimeo(struct rpc_rqst * req)644 static void xprt_reset_minortimeo(struct rpc_rqst *req)
645 {
646 	req->rq_minortimeo += req->rq_timeout;
647 }
648 
xprt_init_majortimeo(struct rpc_task * task,struct rpc_rqst * req)649 static void xprt_init_majortimeo(struct rpc_task *task, struct rpc_rqst *req)
650 {
651 	unsigned long time_init;
652 	struct rpc_xprt *xprt = req->rq_xprt;
653 
654 	if (likely(xprt && xprt_connected(xprt)))
655 		time_init = jiffies;
656 	else
657 		time_init = xprt_abs_ktime_to_jiffies(task->tk_start);
658 	req->rq_timeout = task->tk_client->cl_timeout->to_initval;
659 	req->rq_majortimeo = time_init + xprt_calc_majortimeo(req);
660 	req->rq_minortimeo = time_init + req->rq_timeout;
661 }
662 
663 /**
664  * xprt_adjust_timeout - adjust timeout values for next retransmit
665  * @req: RPC request containing parameters to use for the adjustment
666  *
667  */
xprt_adjust_timeout(struct rpc_rqst * req)668 int xprt_adjust_timeout(struct rpc_rqst *req)
669 {
670 	struct rpc_xprt *xprt = req->rq_xprt;
671 	const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
672 	int status = 0;
673 
674 	if (time_before(jiffies, req->rq_majortimeo)) {
675 		if (time_before(jiffies, req->rq_minortimeo))
676 			return status;
677 		if (to->to_exponential)
678 			req->rq_timeout <<= 1;
679 		else
680 			req->rq_timeout += to->to_increment;
681 		if (to->to_maxval && req->rq_timeout >= to->to_maxval)
682 			req->rq_timeout = to->to_maxval;
683 		req->rq_retries++;
684 	} else {
685 		req->rq_timeout = to->to_initval;
686 		req->rq_retries = 0;
687 		xprt_reset_majortimeo(req);
688 		/* Reset the RTT counters == "slow start" */
689 		spin_lock(&xprt->transport_lock);
690 		rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval);
691 		spin_unlock(&xprt->transport_lock);
692 		status = -ETIMEDOUT;
693 	}
694 	xprt_reset_minortimeo(req);
695 
696 	if (req->rq_timeout == 0) {
697 		printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n");
698 		req->rq_timeout = 5 * HZ;
699 	}
700 	return status;
701 }
702 
xprt_autoclose(struct work_struct * work)703 static void xprt_autoclose(struct work_struct *work)
704 {
705 	struct rpc_xprt *xprt =
706 		container_of(work, struct rpc_xprt, task_cleanup);
707 	unsigned int pflags = memalloc_nofs_save();
708 
709 	trace_xprt_disconnect_auto(xprt);
710 	clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
711 	xprt->ops->close(xprt);
712 	xprt_release_write(xprt, NULL);
713 	wake_up_bit(&xprt->state, XPRT_LOCKED);
714 	memalloc_nofs_restore(pflags);
715 }
716 
717 /**
718  * xprt_disconnect_done - mark a transport as disconnected
719  * @xprt: transport to flag for disconnect
720  *
721  */
xprt_disconnect_done(struct rpc_xprt * xprt)722 void xprt_disconnect_done(struct rpc_xprt *xprt)
723 {
724 	trace_xprt_disconnect_done(xprt);
725 	spin_lock(&xprt->transport_lock);
726 	xprt_clear_connected(xprt);
727 	xprt_clear_write_space_locked(xprt);
728 	xprt_clear_congestion_window_wait_locked(xprt);
729 	xprt_wake_pending_tasks(xprt, -ENOTCONN);
730 	spin_unlock(&xprt->transport_lock);
731 }
732 EXPORT_SYMBOL_GPL(xprt_disconnect_done);
733 
734 /**
735  * xprt_force_disconnect - force a transport to disconnect
736  * @xprt: transport to disconnect
737  *
738  */
xprt_force_disconnect(struct rpc_xprt * xprt)739 void xprt_force_disconnect(struct rpc_xprt *xprt)
740 {
741 	trace_xprt_disconnect_force(xprt);
742 
743 	/* Don't race with the test_bit() in xprt_clear_locked() */
744 	spin_lock(&xprt->transport_lock);
745 	set_bit(XPRT_CLOSE_WAIT, &xprt->state);
746 	/* Try to schedule an autoclose RPC call */
747 	if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
748 		queue_work(xprtiod_workqueue, &xprt->task_cleanup);
749 	else if (xprt->snd_task && !test_bit(XPRT_SND_IS_COOKIE, &xprt->state))
750 		rpc_wake_up_queued_task_set_status(&xprt->pending,
751 						   xprt->snd_task, -ENOTCONN);
752 	spin_unlock(&xprt->transport_lock);
753 }
754 EXPORT_SYMBOL_GPL(xprt_force_disconnect);
755 
756 static unsigned int
xprt_connect_cookie(struct rpc_xprt * xprt)757 xprt_connect_cookie(struct rpc_xprt *xprt)
758 {
759 	return READ_ONCE(xprt->connect_cookie);
760 }
761 
762 static bool
xprt_request_retransmit_after_disconnect(struct rpc_task * task)763 xprt_request_retransmit_after_disconnect(struct rpc_task *task)
764 {
765 	struct rpc_rqst *req = task->tk_rqstp;
766 	struct rpc_xprt *xprt = req->rq_xprt;
767 
768 	return req->rq_connect_cookie != xprt_connect_cookie(xprt) ||
769 		!xprt_connected(xprt);
770 }
771 
772 /**
773  * xprt_conditional_disconnect - force a transport to disconnect
774  * @xprt: transport to disconnect
775  * @cookie: 'connection cookie'
776  *
777  * This attempts to break the connection if and only if 'cookie' matches
778  * the current transport 'connection cookie'. It ensures that we don't
779  * try to break the connection more than once when we need to retransmit
780  * a batch of RPC requests.
781  *
782  */
xprt_conditional_disconnect(struct rpc_xprt * xprt,unsigned int cookie)783 void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie)
784 {
785 	/* Don't race with the test_bit() in xprt_clear_locked() */
786 	spin_lock(&xprt->transport_lock);
787 	if (cookie != xprt->connect_cookie)
788 		goto out;
789 	if (test_bit(XPRT_CLOSING, &xprt->state))
790 		goto out;
791 	set_bit(XPRT_CLOSE_WAIT, &xprt->state);
792 	/* Try to schedule an autoclose RPC call */
793 	if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
794 		queue_work(xprtiod_workqueue, &xprt->task_cleanup);
795 	xprt_wake_pending_tasks(xprt, -EAGAIN);
796 out:
797 	spin_unlock(&xprt->transport_lock);
798 }
799 
800 static bool
xprt_has_timer(const struct rpc_xprt * xprt)801 xprt_has_timer(const struct rpc_xprt *xprt)
802 {
803 	return xprt->idle_timeout != 0;
804 }
805 
806 static void
xprt_schedule_autodisconnect(struct rpc_xprt * xprt)807 xprt_schedule_autodisconnect(struct rpc_xprt *xprt)
808 	__must_hold(&xprt->transport_lock)
809 {
810 	xprt->last_used = jiffies;
811 	if (RB_EMPTY_ROOT(&xprt->recv_queue) && xprt_has_timer(xprt))
812 		mod_timer(&xprt->timer, xprt->last_used + xprt->idle_timeout);
813 }
814 
815 static void
xprt_init_autodisconnect(struct timer_list * t)816 xprt_init_autodisconnect(struct timer_list *t)
817 {
818 	struct rpc_xprt *xprt = from_timer(xprt, t, timer);
819 
820 	if (!RB_EMPTY_ROOT(&xprt->recv_queue))
821 		return;
822 	/* Reset xprt->last_used to avoid connect/autodisconnect cycling */
823 	xprt->last_used = jiffies;
824 	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
825 		return;
826 	queue_work(xprtiod_workqueue, &xprt->task_cleanup);
827 }
828 
xprt_lock_connect(struct rpc_xprt * xprt,struct rpc_task * task,void * cookie)829 bool xprt_lock_connect(struct rpc_xprt *xprt,
830 		struct rpc_task *task,
831 		void *cookie)
832 {
833 	bool ret = false;
834 
835 	spin_lock(&xprt->transport_lock);
836 	if (!test_bit(XPRT_LOCKED, &xprt->state))
837 		goto out;
838 	if (xprt->snd_task != task)
839 		goto out;
840 	set_bit(XPRT_SND_IS_COOKIE, &xprt->state);
841 	xprt->snd_task = cookie;
842 	ret = true;
843 out:
844 	spin_unlock(&xprt->transport_lock);
845 	return ret;
846 }
847 EXPORT_SYMBOL_GPL(xprt_lock_connect);
848 
xprt_unlock_connect(struct rpc_xprt * xprt,void * cookie)849 void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie)
850 {
851 	spin_lock(&xprt->transport_lock);
852 	if (xprt->snd_task != cookie)
853 		goto out;
854 	if (!test_bit(XPRT_LOCKED, &xprt->state))
855 		goto out;
856 	xprt->snd_task =NULL;
857 	clear_bit(XPRT_SND_IS_COOKIE, &xprt->state);
858 	xprt->ops->release_xprt(xprt, NULL);
859 	xprt_schedule_autodisconnect(xprt);
860 out:
861 	spin_unlock(&xprt->transport_lock);
862 	wake_up_bit(&xprt->state, XPRT_LOCKED);
863 }
864 EXPORT_SYMBOL_GPL(xprt_unlock_connect);
865 
866 /**
867  * xprt_connect - schedule a transport connect operation
868  * @task: RPC task that is requesting the connect
869  *
870  */
xprt_connect(struct rpc_task * task)871 void xprt_connect(struct rpc_task *task)
872 {
873 	struct rpc_xprt	*xprt = task->tk_rqstp->rq_xprt;
874 
875 	trace_xprt_connect(xprt);
876 
877 	if (!xprt_bound(xprt)) {
878 		task->tk_status = -EAGAIN;
879 		return;
880 	}
881 	if (!xprt_lock_write(xprt, task))
882 		return;
883 
884 	if (!xprt_connected(xprt) && !test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
885 		task->tk_rqstp->rq_connect_cookie = xprt->connect_cookie;
886 		rpc_sleep_on_timeout(&xprt->pending, task, NULL,
887 				xprt_request_timeout(task->tk_rqstp));
888 
889 		if (test_bit(XPRT_CLOSING, &xprt->state))
890 			return;
891 		if (xprt_test_and_set_connecting(xprt))
892 			return;
893 		/* Race breaker */
894 		if (!xprt_connected(xprt)) {
895 			xprt->stat.connect_start = jiffies;
896 			xprt->ops->connect(xprt, task);
897 		} else {
898 			xprt_clear_connecting(xprt);
899 			task->tk_status = 0;
900 			rpc_wake_up_queued_task(&xprt->pending, task);
901 		}
902 	}
903 	xprt_release_write(xprt, task);
904 }
905 
906 /**
907  * xprt_reconnect_delay - compute the wait before scheduling a connect
908  * @xprt: transport instance
909  *
910  */
xprt_reconnect_delay(const struct rpc_xprt * xprt)911 unsigned long xprt_reconnect_delay(const struct rpc_xprt *xprt)
912 {
913 	unsigned long start, now = jiffies;
914 
915 	start = xprt->stat.connect_start + xprt->reestablish_timeout;
916 	if (time_after(start, now))
917 		return start - now;
918 	return 0;
919 }
920 EXPORT_SYMBOL_GPL(xprt_reconnect_delay);
921 
922 /**
923  * xprt_reconnect_backoff - compute the new re-establish timeout
924  * @xprt: transport instance
925  * @init_to: initial reestablish timeout
926  *
927  */
xprt_reconnect_backoff(struct rpc_xprt * xprt,unsigned long init_to)928 void xprt_reconnect_backoff(struct rpc_xprt *xprt, unsigned long init_to)
929 {
930 	xprt->reestablish_timeout <<= 1;
931 	if (xprt->reestablish_timeout > xprt->max_reconnect_timeout)
932 		xprt->reestablish_timeout = xprt->max_reconnect_timeout;
933 	if (xprt->reestablish_timeout < init_to)
934 		xprt->reestablish_timeout = init_to;
935 }
936 EXPORT_SYMBOL_GPL(xprt_reconnect_backoff);
937 
938 enum xprt_xid_rb_cmp {
939 	XID_RB_EQUAL,
940 	XID_RB_LEFT,
941 	XID_RB_RIGHT,
942 };
943 static enum xprt_xid_rb_cmp
xprt_xid_cmp(__be32 xid1,__be32 xid2)944 xprt_xid_cmp(__be32 xid1, __be32 xid2)
945 {
946 	if (xid1 == xid2)
947 		return XID_RB_EQUAL;
948 	if ((__force u32)xid1 < (__force u32)xid2)
949 		return XID_RB_LEFT;
950 	return XID_RB_RIGHT;
951 }
952 
953 static struct rpc_rqst *
xprt_request_rb_find(struct rpc_xprt * xprt,__be32 xid)954 xprt_request_rb_find(struct rpc_xprt *xprt, __be32 xid)
955 {
956 	struct rb_node *n = xprt->recv_queue.rb_node;
957 	struct rpc_rqst *req;
958 
959 	while (n != NULL) {
960 		req = rb_entry(n, struct rpc_rqst, rq_recv);
961 		switch (xprt_xid_cmp(xid, req->rq_xid)) {
962 		case XID_RB_LEFT:
963 			n = n->rb_left;
964 			break;
965 		case XID_RB_RIGHT:
966 			n = n->rb_right;
967 			break;
968 		case XID_RB_EQUAL:
969 			return req;
970 		}
971 	}
972 	return NULL;
973 }
974 
975 static void
xprt_request_rb_insert(struct rpc_xprt * xprt,struct rpc_rqst * new)976 xprt_request_rb_insert(struct rpc_xprt *xprt, struct rpc_rqst *new)
977 {
978 	struct rb_node **p = &xprt->recv_queue.rb_node;
979 	struct rb_node *n = NULL;
980 	struct rpc_rqst *req;
981 
982 	while (*p != NULL) {
983 		n = *p;
984 		req = rb_entry(n, struct rpc_rqst, rq_recv);
985 		switch(xprt_xid_cmp(new->rq_xid, req->rq_xid)) {
986 		case XID_RB_LEFT:
987 			p = &n->rb_left;
988 			break;
989 		case XID_RB_RIGHT:
990 			p = &n->rb_right;
991 			break;
992 		case XID_RB_EQUAL:
993 			WARN_ON_ONCE(new != req);
994 			return;
995 		}
996 	}
997 	rb_link_node(&new->rq_recv, n, p);
998 	rb_insert_color(&new->rq_recv, &xprt->recv_queue);
999 }
1000 
1001 static void
xprt_request_rb_remove(struct rpc_xprt * xprt,struct rpc_rqst * req)1002 xprt_request_rb_remove(struct rpc_xprt *xprt, struct rpc_rqst *req)
1003 {
1004 	rb_erase(&req->rq_recv, &xprt->recv_queue);
1005 }
1006 
1007 /**
1008  * xprt_lookup_rqst - find an RPC request corresponding to an XID
1009  * @xprt: transport on which the original request was transmitted
1010  * @xid: RPC XID of incoming reply
1011  *
1012  * Caller holds xprt->queue_lock.
1013  */
xprt_lookup_rqst(struct rpc_xprt * xprt,__be32 xid)1014 struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
1015 {
1016 	struct rpc_rqst *entry;
1017 
1018 	entry = xprt_request_rb_find(xprt, xid);
1019 	if (entry != NULL) {
1020 		trace_xprt_lookup_rqst(xprt, xid, 0);
1021 		entry->rq_rtt = ktime_sub(ktime_get(), entry->rq_xtime);
1022 		return entry;
1023 	}
1024 
1025 	dprintk("RPC:       xprt_lookup_rqst did not find xid %08x\n",
1026 			ntohl(xid));
1027 	trace_xprt_lookup_rqst(xprt, xid, -ENOENT);
1028 	xprt->stat.bad_xids++;
1029 	return NULL;
1030 }
1031 EXPORT_SYMBOL_GPL(xprt_lookup_rqst);
1032 
1033 static bool
xprt_is_pinned_rqst(struct rpc_rqst * req)1034 xprt_is_pinned_rqst(struct rpc_rqst *req)
1035 {
1036 	return atomic_read(&req->rq_pin) != 0;
1037 }
1038 
1039 /**
1040  * xprt_pin_rqst - Pin a request on the transport receive list
1041  * @req: Request to pin
1042  *
1043  * Caller must ensure this is atomic with the call to xprt_lookup_rqst()
1044  * so should be holding xprt->queue_lock.
1045  */
xprt_pin_rqst(struct rpc_rqst * req)1046 void xprt_pin_rqst(struct rpc_rqst *req)
1047 {
1048 	atomic_inc(&req->rq_pin);
1049 }
1050 EXPORT_SYMBOL_GPL(xprt_pin_rqst);
1051 
1052 /**
1053  * xprt_unpin_rqst - Unpin a request on the transport receive list
1054  * @req: Request to pin
1055  *
1056  * Caller should be holding xprt->queue_lock.
1057  */
xprt_unpin_rqst(struct rpc_rqst * req)1058 void xprt_unpin_rqst(struct rpc_rqst *req)
1059 {
1060 	if (!test_bit(RPC_TASK_MSG_PIN_WAIT, &req->rq_task->tk_runstate)) {
1061 		atomic_dec(&req->rq_pin);
1062 		return;
1063 	}
1064 	if (atomic_dec_and_test(&req->rq_pin))
1065 		wake_up_var(&req->rq_pin);
1066 }
1067 EXPORT_SYMBOL_GPL(xprt_unpin_rqst);
1068 
xprt_wait_on_pinned_rqst(struct rpc_rqst * req)1069 static void xprt_wait_on_pinned_rqst(struct rpc_rqst *req)
1070 {
1071 	wait_var_event(&req->rq_pin, !xprt_is_pinned_rqst(req));
1072 }
1073 
1074 static bool
xprt_request_data_received(struct rpc_task * task)1075 xprt_request_data_received(struct rpc_task *task)
1076 {
1077 	return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) &&
1078 		READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) != 0;
1079 }
1080 
1081 static bool
xprt_request_need_enqueue_receive(struct rpc_task * task,struct rpc_rqst * req)1082 xprt_request_need_enqueue_receive(struct rpc_task *task, struct rpc_rqst *req)
1083 {
1084 	return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) &&
1085 		READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) == 0;
1086 }
1087 
1088 /**
1089  * xprt_request_enqueue_receive - Add an request to the receive queue
1090  * @task: RPC task
1091  *
1092  */
1093 void
xprt_request_enqueue_receive(struct rpc_task * task)1094 xprt_request_enqueue_receive(struct rpc_task *task)
1095 {
1096 	struct rpc_rqst *req = task->tk_rqstp;
1097 	struct rpc_xprt *xprt = req->rq_xprt;
1098 
1099 	if (!xprt_request_need_enqueue_receive(task, req))
1100 		return;
1101 
1102 	xprt_request_prepare(task->tk_rqstp);
1103 	spin_lock(&xprt->queue_lock);
1104 
1105 	/* Update the softirq receive buffer */
1106 	memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
1107 			sizeof(req->rq_private_buf));
1108 
1109 	/* Add request to the receive list */
1110 	xprt_request_rb_insert(xprt, req);
1111 	set_bit(RPC_TASK_NEED_RECV, &task->tk_runstate);
1112 	spin_unlock(&xprt->queue_lock);
1113 
1114 	/* Turn off autodisconnect */
1115 	del_singleshot_timer_sync(&xprt->timer);
1116 }
1117 
1118 /**
1119  * xprt_request_dequeue_receive_locked - Remove a request from the receive queue
1120  * @task: RPC task
1121  *
1122  * Caller must hold xprt->queue_lock.
1123  */
1124 static void
xprt_request_dequeue_receive_locked(struct rpc_task * task)1125 xprt_request_dequeue_receive_locked(struct rpc_task *task)
1126 {
1127 	struct rpc_rqst *req = task->tk_rqstp;
1128 
1129 	if (test_and_clear_bit(RPC_TASK_NEED_RECV, &task->tk_runstate))
1130 		xprt_request_rb_remove(req->rq_xprt, req);
1131 }
1132 
1133 /**
1134  * xprt_update_rtt - Update RPC RTT statistics
1135  * @task: RPC request that recently completed
1136  *
1137  * Caller holds xprt->queue_lock.
1138  */
xprt_update_rtt(struct rpc_task * task)1139 void xprt_update_rtt(struct rpc_task *task)
1140 {
1141 	struct rpc_rqst *req = task->tk_rqstp;
1142 	struct rpc_rtt *rtt = task->tk_client->cl_rtt;
1143 	unsigned int timer = task->tk_msg.rpc_proc->p_timer;
1144 	long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt));
1145 
1146 	if (timer) {
1147 		if (req->rq_ntrans == 1)
1148 			rpc_update_rtt(rtt, timer, m);
1149 		rpc_set_timeo(rtt, timer, req->rq_ntrans - 1);
1150 	}
1151 }
1152 EXPORT_SYMBOL_GPL(xprt_update_rtt);
1153 
1154 /**
1155  * xprt_complete_rqst - called when reply processing is complete
1156  * @task: RPC request that recently completed
1157  * @copied: actual number of bytes received from the transport
1158  *
1159  * Caller holds xprt->queue_lock.
1160  */
xprt_complete_rqst(struct rpc_task * task,int copied)1161 void xprt_complete_rqst(struct rpc_task *task, int copied)
1162 {
1163 	struct rpc_rqst *req = task->tk_rqstp;
1164 	struct rpc_xprt *xprt = req->rq_xprt;
1165 
1166 	xprt->stat.recvs++;
1167 
1168 	req->rq_private_buf.len = copied;
1169 	/* Ensure all writes are done before we update */
1170 	/* req->rq_reply_bytes_recvd */
1171 	smp_wmb();
1172 	req->rq_reply_bytes_recvd = copied;
1173 	xprt_request_dequeue_receive_locked(task);
1174 	rpc_wake_up_queued_task(&xprt->pending, task);
1175 }
1176 EXPORT_SYMBOL_GPL(xprt_complete_rqst);
1177 
xprt_timer(struct rpc_task * task)1178 static void xprt_timer(struct rpc_task *task)
1179 {
1180 	struct rpc_rqst *req = task->tk_rqstp;
1181 	struct rpc_xprt *xprt = req->rq_xprt;
1182 
1183 	if (task->tk_status != -ETIMEDOUT)
1184 		return;
1185 
1186 	trace_xprt_timer(xprt, req->rq_xid, task->tk_status);
1187 	if (!req->rq_reply_bytes_recvd) {
1188 		if (xprt->ops->timer)
1189 			xprt->ops->timer(xprt, task);
1190 	} else
1191 		task->tk_status = 0;
1192 }
1193 
1194 /**
1195  * xprt_wait_for_reply_request_def - wait for reply
1196  * @task: pointer to rpc_task
1197  *
1198  * Set a request's retransmit timeout based on the transport's
1199  * default timeout parameters.  Used by transports that don't adjust
1200  * the retransmit timeout based on round-trip time estimation,
1201  * and put the task to sleep on the pending queue.
1202  */
xprt_wait_for_reply_request_def(struct rpc_task * task)1203 void xprt_wait_for_reply_request_def(struct rpc_task *task)
1204 {
1205 	struct rpc_rqst *req = task->tk_rqstp;
1206 
1207 	rpc_sleep_on_timeout(&req->rq_xprt->pending, task, xprt_timer,
1208 			xprt_request_timeout(req));
1209 }
1210 EXPORT_SYMBOL_GPL(xprt_wait_for_reply_request_def);
1211 
1212 /**
1213  * xprt_wait_for_reply_request_rtt - wait for reply using RTT estimator
1214  * @task: pointer to rpc_task
1215  *
1216  * Set a request's retransmit timeout using the RTT estimator,
1217  * and put the task to sleep on the pending queue.
1218  */
xprt_wait_for_reply_request_rtt(struct rpc_task * task)1219 void xprt_wait_for_reply_request_rtt(struct rpc_task *task)
1220 {
1221 	int timer = task->tk_msg.rpc_proc->p_timer;
1222 	struct rpc_clnt *clnt = task->tk_client;
1223 	struct rpc_rtt *rtt = clnt->cl_rtt;
1224 	struct rpc_rqst *req = task->tk_rqstp;
1225 	unsigned long max_timeout = clnt->cl_timeout->to_maxval;
1226 	unsigned long timeout;
1227 
1228 	timeout = rpc_calc_rto(rtt, timer);
1229 	timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries;
1230 	if (timeout > max_timeout || timeout == 0)
1231 		timeout = max_timeout;
1232 	rpc_sleep_on_timeout(&req->rq_xprt->pending, task, xprt_timer,
1233 			jiffies + timeout);
1234 }
1235 EXPORT_SYMBOL_GPL(xprt_wait_for_reply_request_rtt);
1236 
1237 /**
1238  * xprt_request_wait_receive - wait for the reply to an RPC request
1239  * @task: RPC task about to send a request
1240  *
1241  */
xprt_request_wait_receive(struct rpc_task * task)1242 void xprt_request_wait_receive(struct rpc_task *task)
1243 {
1244 	struct rpc_rqst *req = task->tk_rqstp;
1245 	struct rpc_xprt *xprt = req->rq_xprt;
1246 
1247 	if (!test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate))
1248 		return;
1249 	/*
1250 	 * Sleep on the pending queue if we're expecting a reply.
1251 	 * The spinlock ensures atomicity between the test of
1252 	 * req->rq_reply_bytes_recvd, and the call to rpc_sleep_on().
1253 	 */
1254 	spin_lock(&xprt->queue_lock);
1255 	if (test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate)) {
1256 		xprt->ops->wait_for_reply_request(task);
1257 		/*
1258 		 * Send an extra queue wakeup call if the
1259 		 * connection was dropped in case the call to
1260 		 * rpc_sleep_on() raced.
1261 		 */
1262 		if (xprt_request_retransmit_after_disconnect(task))
1263 			rpc_wake_up_queued_task_set_status(&xprt->pending,
1264 					task, -ENOTCONN);
1265 	}
1266 	spin_unlock(&xprt->queue_lock);
1267 }
1268 
1269 static bool
xprt_request_need_enqueue_transmit(struct rpc_task * task,struct rpc_rqst * req)1270 xprt_request_need_enqueue_transmit(struct rpc_task *task, struct rpc_rqst *req)
1271 {
1272 	return !test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
1273 }
1274 
1275 /**
1276  * xprt_request_enqueue_transmit - queue a task for transmission
1277  * @task: pointer to rpc_task
1278  *
1279  * Add a task to the transmission queue.
1280  */
1281 void
xprt_request_enqueue_transmit(struct rpc_task * task)1282 xprt_request_enqueue_transmit(struct rpc_task *task)
1283 {
1284 	struct rpc_rqst *pos, *req = task->tk_rqstp;
1285 	struct rpc_xprt *xprt = req->rq_xprt;
1286 
1287 	if (xprt_request_need_enqueue_transmit(task, req)) {
1288 		req->rq_bytes_sent = 0;
1289 		spin_lock(&xprt->queue_lock);
1290 		/*
1291 		 * Requests that carry congestion control credits are added
1292 		 * to the head of the list to avoid starvation issues.
1293 		 */
1294 		if (req->rq_cong) {
1295 			xprt_clear_congestion_window_wait(xprt);
1296 			list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1297 				if (pos->rq_cong)
1298 					continue;
1299 				/* Note: req is added _before_ pos */
1300 				list_add_tail(&req->rq_xmit, &pos->rq_xmit);
1301 				INIT_LIST_HEAD(&req->rq_xmit2);
1302 				goto out;
1303 			}
1304 		} else if (RPC_IS_SWAPPER(task)) {
1305 			list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1306 				if (pos->rq_cong || pos->rq_bytes_sent)
1307 					continue;
1308 				if (RPC_IS_SWAPPER(pos->rq_task))
1309 					continue;
1310 				/* Note: req is added _before_ pos */
1311 				list_add_tail(&req->rq_xmit, &pos->rq_xmit);
1312 				INIT_LIST_HEAD(&req->rq_xmit2);
1313 				goto out;
1314 			}
1315 		} else if (!req->rq_seqno) {
1316 			list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1317 				if (pos->rq_task->tk_owner != task->tk_owner)
1318 					continue;
1319 				list_add_tail(&req->rq_xmit2, &pos->rq_xmit2);
1320 				INIT_LIST_HEAD(&req->rq_xmit);
1321 				goto out;
1322 			}
1323 		}
1324 		list_add_tail(&req->rq_xmit, &xprt->xmit_queue);
1325 		INIT_LIST_HEAD(&req->rq_xmit2);
1326 out:
1327 		set_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
1328 		spin_unlock(&xprt->queue_lock);
1329 	}
1330 }
1331 
1332 /**
1333  * xprt_request_dequeue_transmit_locked - remove a task from the transmission queue
1334  * @task: pointer to rpc_task
1335  *
1336  * Remove a task from the transmission queue
1337  * Caller must hold xprt->queue_lock
1338  */
1339 static void
xprt_request_dequeue_transmit_locked(struct rpc_task * task)1340 xprt_request_dequeue_transmit_locked(struct rpc_task *task)
1341 {
1342 	struct rpc_rqst *req = task->tk_rqstp;
1343 
1344 	if (!test_and_clear_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1345 		return;
1346 	if (!list_empty(&req->rq_xmit)) {
1347 		list_del(&req->rq_xmit);
1348 		if (!list_empty(&req->rq_xmit2)) {
1349 			struct rpc_rqst *next = list_first_entry(&req->rq_xmit2,
1350 					struct rpc_rqst, rq_xmit2);
1351 			list_del(&req->rq_xmit2);
1352 			list_add_tail(&next->rq_xmit, &next->rq_xprt->xmit_queue);
1353 		}
1354 	} else
1355 		list_del(&req->rq_xmit2);
1356 }
1357 
1358 /**
1359  * xprt_request_dequeue_transmit - remove a task from the transmission queue
1360  * @task: pointer to rpc_task
1361  *
1362  * Remove a task from the transmission queue
1363  */
1364 static void
xprt_request_dequeue_transmit(struct rpc_task * task)1365 xprt_request_dequeue_transmit(struct rpc_task *task)
1366 {
1367 	struct rpc_rqst *req = task->tk_rqstp;
1368 	struct rpc_xprt *xprt = req->rq_xprt;
1369 
1370 	spin_lock(&xprt->queue_lock);
1371 	xprt_request_dequeue_transmit_locked(task);
1372 	spin_unlock(&xprt->queue_lock);
1373 }
1374 
1375 /**
1376  * xprt_request_dequeue_xprt - remove a task from the transmit+receive queue
1377  * @task: pointer to rpc_task
1378  *
1379  * Remove a task from the transmit and receive queues, and ensure that
1380  * it is not pinned by the receive work item.
1381  */
1382 void
xprt_request_dequeue_xprt(struct rpc_task * task)1383 xprt_request_dequeue_xprt(struct rpc_task *task)
1384 {
1385 	struct rpc_rqst	*req = task->tk_rqstp;
1386 	struct rpc_xprt *xprt = req->rq_xprt;
1387 
1388 	if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate) ||
1389 	    test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) ||
1390 	    xprt_is_pinned_rqst(req)) {
1391 		spin_lock(&xprt->queue_lock);
1392 		xprt_request_dequeue_transmit_locked(task);
1393 		xprt_request_dequeue_receive_locked(task);
1394 		while (xprt_is_pinned_rqst(req)) {
1395 			set_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
1396 			spin_unlock(&xprt->queue_lock);
1397 			xprt_wait_on_pinned_rqst(req);
1398 			spin_lock(&xprt->queue_lock);
1399 			clear_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
1400 		}
1401 		spin_unlock(&xprt->queue_lock);
1402 	}
1403 }
1404 
1405 /**
1406  * xprt_request_prepare - prepare an encoded request for transport
1407  * @req: pointer to rpc_rqst
1408  *
1409  * Calls into the transport layer to do whatever is needed to prepare
1410  * the request for transmission or receive.
1411  */
1412 void
xprt_request_prepare(struct rpc_rqst * req)1413 xprt_request_prepare(struct rpc_rqst *req)
1414 {
1415 	struct rpc_xprt *xprt = req->rq_xprt;
1416 
1417 	if (xprt->ops->prepare_request)
1418 		xprt->ops->prepare_request(req);
1419 }
1420 
1421 /**
1422  * xprt_request_need_retransmit - Test if a task needs retransmission
1423  * @task: pointer to rpc_task
1424  *
1425  * Test for whether a connection breakage requires the task to retransmit
1426  */
1427 bool
xprt_request_need_retransmit(struct rpc_task * task)1428 xprt_request_need_retransmit(struct rpc_task *task)
1429 {
1430 	return xprt_request_retransmit_after_disconnect(task);
1431 }
1432 
1433 /**
1434  * xprt_prepare_transmit - reserve the transport before sending a request
1435  * @task: RPC task about to send a request
1436  *
1437  */
xprt_prepare_transmit(struct rpc_task * task)1438 bool xprt_prepare_transmit(struct rpc_task *task)
1439 {
1440 	struct rpc_rqst	*req = task->tk_rqstp;
1441 	struct rpc_xprt	*xprt = req->rq_xprt;
1442 
1443 	if (!xprt_lock_write(xprt, task)) {
1444 		/* Race breaker: someone may have transmitted us */
1445 		if (!test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1446 			rpc_wake_up_queued_task_set_status(&xprt->sending,
1447 					task, 0);
1448 		return false;
1449 
1450 	}
1451 	return true;
1452 }
1453 
xprt_end_transmit(struct rpc_task * task)1454 void xprt_end_transmit(struct rpc_task *task)
1455 {
1456 	struct rpc_xprt	*xprt = task->tk_rqstp->rq_xprt;
1457 
1458 	xprt_inject_disconnect(xprt);
1459 	xprt_release_write(xprt, task);
1460 }
1461 
1462 /**
1463  * xprt_request_transmit - send an RPC request on a transport
1464  * @req: pointer to request to transmit
1465  * @snd_task: RPC task that owns the transport lock
1466  *
1467  * This performs the transmission of a single request.
1468  * Note that if the request is not the same as snd_task, then it
1469  * does need to be pinned.
1470  * Returns '0' on success.
1471  */
1472 static int
xprt_request_transmit(struct rpc_rqst * req,struct rpc_task * snd_task)1473 xprt_request_transmit(struct rpc_rqst *req, struct rpc_task *snd_task)
1474 {
1475 	struct rpc_xprt *xprt = req->rq_xprt;
1476 	struct rpc_task *task = req->rq_task;
1477 	unsigned int connect_cookie;
1478 	int is_retrans = RPC_WAS_SENT(task);
1479 	int status;
1480 
1481 	if (!req->rq_bytes_sent) {
1482 		if (xprt_request_data_received(task)) {
1483 			status = 0;
1484 			goto out_dequeue;
1485 		}
1486 		/* Verify that our message lies in the RPCSEC_GSS window */
1487 		if (rpcauth_xmit_need_reencode(task)) {
1488 			status = -EBADMSG;
1489 			goto out_dequeue;
1490 		}
1491 		if (RPC_SIGNALLED(task)) {
1492 			status = -ERESTARTSYS;
1493 			goto out_dequeue;
1494 		}
1495 	}
1496 
1497 	/*
1498 	 * Update req->rq_ntrans before transmitting to avoid races with
1499 	 * xprt_update_rtt(), which needs to know that it is recording a
1500 	 * reply to the first transmission.
1501 	 */
1502 	req->rq_ntrans++;
1503 
1504 	trace_rpc_xdr_sendto(task, &req->rq_snd_buf);
1505 	connect_cookie = xprt->connect_cookie;
1506 	status = xprt->ops->send_request(req);
1507 	if (status != 0) {
1508 		req->rq_ntrans--;
1509 		trace_xprt_transmit(req, status);
1510 		return status;
1511 	}
1512 
1513 	if (is_retrans)
1514 		task->tk_client->cl_stats->rpcretrans++;
1515 
1516 	xprt_inject_disconnect(xprt);
1517 
1518 	task->tk_flags |= RPC_TASK_SENT;
1519 	spin_lock(&xprt->transport_lock);
1520 
1521 	xprt->stat.sends++;
1522 	xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs;
1523 	xprt->stat.bklog_u += xprt->backlog.qlen;
1524 	xprt->stat.sending_u += xprt->sending.qlen;
1525 	xprt->stat.pending_u += xprt->pending.qlen;
1526 	spin_unlock(&xprt->transport_lock);
1527 
1528 	req->rq_connect_cookie = connect_cookie;
1529 out_dequeue:
1530 	trace_xprt_transmit(req, status);
1531 	xprt_request_dequeue_transmit(task);
1532 	rpc_wake_up_queued_task_set_status(&xprt->sending, task, status);
1533 	return status;
1534 }
1535 
1536 /**
1537  * xprt_transmit - send an RPC request on a transport
1538  * @task: controlling RPC task
1539  *
1540  * Attempts to drain the transmit queue. On exit, either the transport
1541  * signalled an error that needs to be handled before transmission can
1542  * resume, or @task finished transmitting, and detected that it already
1543  * received a reply.
1544  */
1545 void
xprt_transmit(struct rpc_task * task)1546 xprt_transmit(struct rpc_task *task)
1547 {
1548 	struct rpc_rqst *next, *req = task->tk_rqstp;
1549 	struct rpc_xprt	*xprt = req->rq_xprt;
1550 	int status;
1551 
1552 	spin_lock(&xprt->queue_lock);
1553 	for (;;) {
1554 		next = list_first_entry_or_null(&xprt->xmit_queue,
1555 						struct rpc_rqst, rq_xmit);
1556 		if (!next)
1557 			break;
1558 		xprt_pin_rqst(next);
1559 		spin_unlock(&xprt->queue_lock);
1560 		status = xprt_request_transmit(next, task);
1561 		if (status == -EBADMSG && next != req)
1562 			status = 0;
1563 		spin_lock(&xprt->queue_lock);
1564 		xprt_unpin_rqst(next);
1565 		if (status < 0) {
1566 			if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1567 				task->tk_status = status;
1568 			break;
1569 		}
1570 		/* Was @task transmitted, and has it received a reply? */
1571 		if (xprt_request_data_received(task) &&
1572 		    !test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1573 			break;
1574 		cond_resched_lock(&xprt->queue_lock);
1575 	}
1576 	spin_unlock(&xprt->queue_lock);
1577 }
1578 
xprt_complete_request_init(struct rpc_task * task)1579 static void xprt_complete_request_init(struct rpc_task *task)
1580 {
1581 	if (task->tk_rqstp)
1582 		xprt_request_init(task);
1583 }
1584 
xprt_add_backlog(struct rpc_xprt * xprt,struct rpc_task * task)1585 void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
1586 {
1587 	set_bit(XPRT_CONGESTED, &xprt->state);
1588 	rpc_sleep_on(&xprt->backlog, task, xprt_complete_request_init);
1589 }
1590 EXPORT_SYMBOL_GPL(xprt_add_backlog);
1591 
__xprt_set_rq(struct rpc_task * task,void * data)1592 static bool __xprt_set_rq(struct rpc_task *task, void *data)
1593 {
1594 	struct rpc_rqst *req = data;
1595 
1596 	if (task->tk_rqstp == NULL) {
1597 		memset(req, 0, sizeof(*req));	/* mark unused */
1598 		task->tk_rqstp = req;
1599 		return true;
1600 	}
1601 	return false;
1602 }
1603 
xprt_wake_up_backlog(struct rpc_xprt * xprt,struct rpc_rqst * req)1604 bool xprt_wake_up_backlog(struct rpc_xprt *xprt, struct rpc_rqst *req)
1605 {
1606 	if (rpc_wake_up_first(&xprt->backlog, __xprt_set_rq, req) == NULL) {
1607 		clear_bit(XPRT_CONGESTED, &xprt->state);
1608 		return false;
1609 	}
1610 	return true;
1611 }
1612 EXPORT_SYMBOL_GPL(xprt_wake_up_backlog);
1613 
xprt_throttle_congested(struct rpc_xprt * xprt,struct rpc_task * task)1614 static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task)
1615 {
1616 	bool ret = false;
1617 
1618 	if (!test_bit(XPRT_CONGESTED, &xprt->state))
1619 		goto out;
1620 	spin_lock(&xprt->reserve_lock);
1621 	if (test_bit(XPRT_CONGESTED, &xprt->state)) {
1622 		xprt_add_backlog(xprt, task);
1623 		ret = true;
1624 	}
1625 	spin_unlock(&xprt->reserve_lock);
1626 out:
1627 	return ret;
1628 }
1629 
xprt_dynamic_alloc_slot(struct rpc_xprt * xprt)1630 static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt)
1631 {
1632 	struct rpc_rqst *req = ERR_PTR(-EAGAIN);
1633 
1634 	if (xprt->num_reqs >= xprt->max_reqs)
1635 		goto out;
1636 	++xprt->num_reqs;
1637 	spin_unlock(&xprt->reserve_lock);
1638 	req = kzalloc(sizeof(struct rpc_rqst), GFP_NOFS);
1639 	spin_lock(&xprt->reserve_lock);
1640 	if (req != NULL)
1641 		goto out;
1642 	--xprt->num_reqs;
1643 	req = ERR_PTR(-ENOMEM);
1644 out:
1645 	return req;
1646 }
1647 
xprt_dynamic_free_slot(struct rpc_xprt * xprt,struct rpc_rqst * req)1648 static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1649 {
1650 	if (xprt->num_reqs > xprt->min_reqs) {
1651 		--xprt->num_reqs;
1652 		kfree(req);
1653 		return true;
1654 	}
1655 	return false;
1656 }
1657 
xprt_alloc_slot(struct rpc_xprt * xprt,struct rpc_task * task)1658 void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
1659 {
1660 	struct rpc_rqst *req;
1661 
1662 	spin_lock(&xprt->reserve_lock);
1663 	if (!list_empty(&xprt->free)) {
1664 		req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
1665 		list_del(&req->rq_list);
1666 		goto out_init_req;
1667 	}
1668 	req = xprt_dynamic_alloc_slot(xprt);
1669 	if (!IS_ERR(req))
1670 		goto out_init_req;
1671 	switch (PTR_ERR(req)) {
1672 	case -ENOMEM:
1673 		dprintk("RPC:       dynamic allocation of request slot "
1674 				"failed! Retrying\n");
1675 		task->tk_status = -ENOMEM;
1676 		break;
1677 	case -EAGAIN:
1678 		xprt_add_backlog(xprt, task);
1679 		dprintk("RPC:       waiting for request slot\n");
1680 		fallthrough;
1681 	default:
1682 		task->tk_status = -EAGAIN;
1683 	}
1684 	spin_unlock(&xprt->reserve_lock);
1685 	return;
1686 out_init_req:
1687 	xprt->stat.max_slots = max_t(unsigned int, xprt->stat.max_slots,
1688 				     xprt->num_reqs);
1689 	spin_unlock(&xprt->reserve_lock);
1690 
1691 	task->tk_status = 0;
1692 	task->tk_rqstp = req;
1693 }
1694 EXPORT_SYMBOL_GPL(xprt_alloc_slot);
1695 
xprt_free_slot(struct rpc_xprt * xprt,struct rpc_rqst * req)1696 void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1697 {
1698 	spin_lock(&xprt->reserve_lock);
1699 	if (!xprt_wake_up_backlog(xprt, req) &&
1700 	    !xprt_dynamic_free_slot(xprt, req)) {
1701 		memset(req, 0, sizeof(*req));	/* mark unused */
1702 		list_add(&req->rq_list, &xprt->free);
1703 	}
1704 	spin_unlock(&xprt->reserve_lock);
1705 }
1706 EXPORT_SYMBOL_GPL(xprt_free_slot);
1707 
xprt_free_all_slots(struct rpc_xprt * xprt)1708 static void xprt_free_all_slots(struct rpc_xprt *xprt)
1709 {
1710 	struct rpc_rqst *req;
1711 	while (!list_empty(&xprt->free)) {
1712 		req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list);
1713 		list_del(&req->rq_list);
1714 		kfree(req);
1715 	}
1716 }
1717 
xprt_alloc(struct net * net,size_t size,unsigned int num_prealloc,unsigned int max_alloc)1718 struct rpc_xprt *xprt_alloc(struct net *net, size_t size,
1719 		unsigned int num_prealloc,
1720 		unsigned int max_alloc)
1721 {
1722 	struct rpc_xprt *xprt;
1723 	struct rpc_rqst *req;
1724 	int i;
1725 
1726 	xprt = kzalloc(size, GFP_KERNEL);
1727 	if (xprt == NULL)
1728 		goto out;
1729 
1730 	xprt_init(xprt, net);
1731 
1732 	for (i = 0; i < num_prealloc; i++) {
1733 		req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL);
1734 		if (!req)
1735 			goto out_free;
1736 		list_add(&req->rq_list, &xprt->free);
1737 	}
1738 	if (max_alloc > num_prealloc)
1739 		xprt->max_reqs = max_alloc;
1740 	else
1741 		xprt->max_reqs = num_prealloc;
1742 	xprt->min_reqs = num_prealloc;
1743 	xprt->num_reqs = num_prealloc;
1744 
1745 	return xprt;
1746 
1747 out_free:
1748 	xprt_free(xprt);
1749 out:
1750 	return NULL;
1751 }
1752 EXPORT_SYMBOL_GPL(xprt_alloc);
1753 
xprt_free(struct rpc_xprt * xprt)1754 void xprt_free(struct rpc_xprt *xprt)
1755 {
1756 	put_net(xprt->xprt_net);
1757 	xprt_free_all_slots(xprt);
1758 	kfree_rcu(xprt, rcu);
1759 }
1760 EXPORT_SYMBOL_GPL(xprt_free);
1761 
1762 static void
xprt_init_connect_cookie(struct rpc_rqst * req,struct rpc_xprt * xprt)1763 xprt_init_connect_cookie(struct rpc_rqst *req, struct rpc_xprt *xprt)
1764 {
1765 	req->rq_connect_cookie = xprt_connect_cookie(xprt) - 1;
1766 }
1767 
1768 static __be32
xprt_alloc_xid(struct rpc_xprt * xprt)1769 xprt_alloc_xid(struct rpc_xprt *xprt)
1770 {
1771 	__be32 xid;
1772 
1773 	spin_lock(&xprt->reserve_lock);
1774 	xid = (__force __be32)xprt->xid++;
1775 	spin_unlock(&xprt->reserve_lock);
1776 	return xid;
1777 }
1778 
1779 static void
xprt_init_xid(struct rpc_xprt * xprt)1780 xprt_init_xid(struct rpc_xprt *xprt)
1781 {
1782 	xprt->xid = prandom_u32();
1783 }
1784 
1785 static void
xprt_request_init(struct rpc_task * task)1786 xprt_request_init(struct rpc_task *task)
1787 {
1788 	struct rpc_xprt *xprt = task->tk_xprt;
1789 	struct rpc_rqst	*req = task->tk_rqstp;
1790 
1791 	req->rq_task	= task;
1792 	req->rq_xprt    = xprt;
1793 	req->rq_buffer  = NULL;
1794 	req->rq_xid	= xprt_alloc_xid(xprt);
1795 	xprt_init_connect_cookie(req, xprt);
1796 	req->rq_snd_buf.len = 0;
1797 	req->rq_snd_buf.buflen = 0;
1798 	req->rq_rcv_buf.len = 0;
1799 	req->rq_rcv_buf.buflen = 0;
1800 	req->rq_snd_buf.bvec = NULL;
1801 	req->rq_rcv_buf.bvec = NULL;
1802 	req->rq_release_snd_buf = NULL;
1803 	xprt_init_majortimeo(task, req);
1804 
1805 	trace_xprt_reserve(req);
1806 }
1807 
1808 static void
xprt_do_reserve(struct rpc_xprt * xprt,struct rpc_task * task)1809 xprt_do_reserve(struct rpc_xprt *xprt, struct rpc_task *task)
1810 {
1811 	xprt->ops->alloc_slot(xprt, task);
1812 	if (task->tk_rqstp != NULL)
1813 		xprt_request_init(task);
1814 }
1815 
1816 /**
1817  * xprt_reserve - allocate an RPC request slot
1818  * @task: RPC task requesting a slot allocation
1819  *
1820  * If the transport is marked as being congested, or if no more
1821  * slots are available, place the task on the transport's
1822  * backlog queue.
1823  */
xprt_reserve(struct rpc_task * task)1824 void xprt_reserve(struct rpc_task *task)
1825 {
1826 	struct rpc_xprt *xprt = task->tk_xprt;
1827 
1828 	task->tk_status = 0;
1829 	if (task->tk_rqstp != NULL)
1830 		return;
1831 
1832 	task->tk_status = -EAGAIN;
1833 	if (!xprt_throttle_congested(xprt, task))
1834 		xprt_do_reserve(xprt, task);
1835 }
1836 
1837 /**
1838  * xprt_retry_reserve - allocate an RPC request slot
1839  * @task: RPC task requesting a slot allocation
1840  *
1841  * If no more slots are available, place the task on the transport's
1842  * backlog queue.
1843  * Note that the only difference with xprt_reserve is that we now
1844  * ignore the value of the XPRT_CONGESTED flag.
1845  */
xprt_retry_reserve(struct rpc_task * task)1846 void xprt_retry_reserve(struct rpc_task *task)
1847 {
1848 	struct rpc_xprt *xprt = task->tk_xprt;
1849 
1850 	task->tk_status = 0;
1851 	if (task->tk_rqstp != NULL)
1852 		return;
1853 
1854 	task->tk_status = -EAGAIN;
1855 	xprt_do_reserve(xprt, task);
1856 }
1857 
1858 /**
1859  * xprt_release - release an RPC request slot
1860  * @task: task which is finished with the slot
1861  *
1862  */
xprt_release(struct rpc_task * task)1863 void xprt_release(struct rpc_task *task)
1864 {
1865 	struct rpc_xprt	*xprt;
1866 	struct rpc_rqst	*req = task->tk_rqstp;
1867 
1868 	if (req == NULL) {
1869 		if (task->tk_client) {
1870 			xprt = task->tk_xprt;
1871 			xprt_release_write(xprt, task);
1872 		}
1873 		return;
1874 	}
1875 
1876 	xprt = req->rq_xprt;
1877 	xprt_request_dequeue_xprt(task);
1878 	spin_lock(&xprt->transport_lock);
1879 	xprt->ops->release_xprt(xprt, task);
1880 	if (xprt->ops->release_request)
1881 		xprt->ops->release_request(task);
1882 	xprt_schedule_autodisconnect(xprt);
1883 	spin_unlock(&xprt->transport_lock);
1884 	if (req->rq_buffer)
1885 		xprt->ops->buf_free(task);
1886 	xdr_free_bvec(&req->rq_rcv_buf);
1887 	xdr_free_bvec(&req->rq_snd_buf);
1888 	if (req->rq_cred != NULL)
1889 		put_rpccred(req->rq_cred);
1890 	if (req->rq_release_snd_buf)
1891 		req->rq_release_snd_buf(req);
1892 
1893 	task->tk_rqstp = NULL;
1894 	if (likely(!bc_prealloc(req)))
1895 		xprt->ops->free_slot(xprt, req);
1896 	else
1897 		xprt_free_bc_request(req);
1898 }
1899 
1900 #ifdef CONFIG_SUNRPC_BACKCHANNEL
1901 void
xprt_init_bc_request(struct rpc_rqst * req,struct rpc_task * task)1902 xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task)
1903 {
1904 	struct xdr_buf *xbufp = &req->rq_snd_buf;
1905 
1906 	task->tk_rqstp = req;
1907 	req->rq_task = task;
1908 	xprt_init_connect_cookie(req, req->rq_xprt);
1909 	/*
1910 	 * Set up the xdr_buf length.
1911 	 * This also indicates that the buffer is XDR encoded already.
1912 	 */
1913 	xbufp->len = xbufp->head[0].iov_len + xbufp->page_len +
1914 		xbufp->tail[0].iov_len;
1915 }
1916 #endif
1917 
xprt_init(struct rpc_xprt * xprt,struct net * net)1918 static void xprt_init(struct rpc_xprt *xprt, struct net *net)
1919 {
1920 	kref_init(&xprt->kref);
1921 
1922 	spin_lock_init(&xprt->transport_lock);
1923 	spin_lock_init(&xprt->reserve_lock);
1924 	spin_lock_init(&xprt->queue_lock);
1925 
1926 	INIT_LIST_HEAD(&xprt->free);
1927 	xprt->recv_queue = RB_ROOT;
1928 	INIT_LIST_HEAD(&xprt->xmit_queue);
1929 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1930 	spin_lock_init(&xprt->bc_pa_lock);
1931 	INIT_LIST_HEAD(&xprt->bc_pa_list);
1932 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
1933 	INIT_LIST_HEAD(&xprt->xprt_switch);
1934 
1935 	xprt->last_used = jiffies;
1936 	xprt->cwnd = RPC_INITCWND;
1937 	xprt->bind_index = 0;
1938 
1939 	rpc_init_wait_queue(&xprt->binding, "xprt_binding");
1940 	rpc_init_wait_queue(&xprt->pending, "xprt_pending");
1941 	rpc_init_wait_queue(&xprt->sending, "xprt_sending");
1942 	rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");
1943 
1944 	xprt_init_xid(xprt);
1945 
1946 	xprt->xprt_net = get_net(net);
1947 }
1948 
1949 /**
1950  * xprt_create_transport - create an RPC transport
1951  * @args: rpc transport creation arguments
1952  *
1953  */
xprt_create_transport(struct xprt_create * args)1954 struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
1955 {
1956 	struct rpc_xprt	*xprt;
1957 	struct xprt_class *t;
1958 
1959 	spin_lock(&xprt_list_lock);
1960 	list_for_each_entry(t, &xprt_list, list) {
1961 		if (t->ident == args->ident) {
1962 			spin_unlock(&xprt_list_lock);
1963 			goto found;
1964 		}
1965 	}
1966 	spin_unlock(&xprt_list_lock);
1967 	dprintk("RPC: transport (%d) not supported\n", args->ident);
1968 	return ERR_PTR(-EIO);
1969 
1970 found:
1971 	xprt = t->setup(args);
1972 	if (IS_ERR(xprt))
1973 		goto out;
1974 	if (args->flags & XPRT_CREATE_NO_IDLE_TIMEOUT)
1975 		xprt->idle_timeout = 0;
1976 	INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
1977 	if (xprt_has_timer(xprt))
1978 		timer_setup(&xprt->timer, xprt_init_autodisconnect, 0);
1979 	else
1980 		timer_setup(&xprt->timer, NULL, 0);
1981 
1982 	if (strlen(args->servername) > RPC_MAXNETNAMELEN) {
1983 		xprt_destroy(xprt);
1984 		return ERR_PTR(-EINVAL);
1985 	}
1986 	xprt->servername = kstrdup(args->servername, GFP_KERNEL);
1987 	if (xprt->servername == NULL) {
1988 		xprt_destroy(xprt);
1989 		return ERR_PTR(-ENOMEM);
1990 	}
1991 
1992 	rpc_xprt_debugfs_register(xprt);
1993 
1994 	trace_xprt_create(xprt);
1995 out:
1996 	return xprt;
1997 }
1998 
xprt_destroy_cb(struct work_struct * work)1999 static void xprt_destroy_cb(struct work_struct *work)
2000 {
2001 	struct rpc_xprt *xprt =
2002 		container_of(work, struct rpc_xprt, task_cleanup);
2003 
2004 	trace_xprt_destroy(xprt);
2005 
2006 	rpc_xprt_debugfs_unregister(xprt);
2007 	rpc_destroy_wait_queue(&xprt->binding);
2008 	rpc_destroy_wait_queue(&xprt->pending);
2009 	rpc_destroy_wait_queue(&xprt->sending);
2010 	rpc_destroy_wait_queue(&xprt->backlog);
2011 	kfree(xprt->servername);
2012 	/*
2013 	 * Destroy any existing back channel
2014 	 */
2015 	xprt_destroy_backchannel(xprt, UINT_MAX);
2016 
2017 	/*
2018 	 * Tear down transport state and free the rpc_xprt
2019 	 */
2020 	xprt->ops->destroy(xprt);
2021 }
2022 
2023 /**
2024  * xprt_destroy - destroy an RPC transport, killing off all requests.
2025  * @xprt: transport to destroy
2026  *
2027  */
xprt_destroy(struct rpc_xprt * xprt)2028 static void xprt_destroy(struct rpc_xprt *xprt)
2029 {
2030 	/*
2031 	 * Exclude transport connect/disconnect handlers and autoclose
2032 	 */
2033 	wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE);
2034 
2035 	del_timer_sync(&xprt->timer);
2036 
2037 	/*
2038 	 * Destroy sockets etc from the system workqueue so they can
2039 	 * safely flush receive work running on rpciod.
2040 	 */
2041 	INIT_WORK(&xprt->task_cleanup, xprt_destroy_cb);
2042 	schedule_work(&xprt->task_cleanup);
2043 }
2044 
xprt_destroy_kref(struct kref * kref)2045 static void xprt_destroy_kref(struct kref *kref)
2046 {
2047 	xprt_destroy(container_of(kref, struct rpc_xprt, kref));
2048 }
2049 
2050 /**
2051  * xprt_get - return a reference to an RPC transport.
2052  * @xprt: pointer to the transport
2053  *
2054  */
xprt_get(struct rpc_xprt * xprt)2055 struct rpc_xprt *xprt_get(struct rpc_xprt *xprt)
2056 {
2057 	if (xprt != NULL && kref_get_unless_zero(&xprt->kref))
2058 		return xprt;
2059 	return NULL;
2060 }
2061 EXPORT_SYMBOL_GPL(xprt_get);
2062 
2063 /**
2064  * xprt_put - release a reference to an RPC transport.
2065  * @xprt: pointer to the transport
2066  *
2067  */
xprt_put(struct rpc_xprt * xprt)2068 void xprt_put(struct rpc_xprt *xprt)
2069 {
2070 	if (xprt != NULL)
2071 		kref_put(&xprt->kref, xprt_destroy_kref);
2072 }
2073 EXPORT_SYMBOL_GPL(xprt_put);
2074