• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/net/sunrpc/xprt.c
4  *
5  *  This is a generic RPC call interface supporting congestion avoidance,
6  *  and asynchronous calls.
7  *
8  *  The interface works like this:
9  *
10  *  -	When a process places a call, it allocates a request slot if
11  *	one is available. Otherwise, it sleeps on the backlog queue
12  *	(xprt_reserve).
13  *  -	Next, the caller puts together the RPC message, stuffs it into
14  *	the request struct, and calls xprt_transmit().
15  *  -	xprt_transmit sends the message and installs the caller on the
16  *	transport's wait list. At the same time, if a reply is expected,
17  *	it installs a timer that is run after the packet's timeout has
18  *	expired.
19  *  -	When a packet arrives, the data_ready handler walks the list of
20  *	pending requests for that transport. If a matching XID is found, the
21  *	caller is woken up, and the timer removed.
22  *  -	When no reply arrives within the timeout interval, the timer is
23  *	fired by the kernel and runs xprt_timer(). It either adjusts the
24  *	timeout values (minor timeout) or wakes up the caller with a status
25  *	of -ETIMEDOUT.
26  *  -	When the caller receives a notification from RPC that a reply arrived,
27  *	it should release the RPC slot, and process the reply.
28  *	If the call timed out, it may choose to retry the operation by
29  *	adjusting the initial timeout value, and simply calling rpc_call
30  *	again.
31  *
32  *  Support for async RPC is done through a set of RPC-specific scheduling
33  *  primitives that `transparently' work for processes as well as async
34  *  tasks that rely on callbacks.
35  *
36  *  Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
37  *
38  *  Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com>
39  */
40 
41 #include <linux/module.h>
42 
43 #include <linux/types.h>
44 #include <linux/interrupt.h>
45 #include <linux/workqueue.h>
46 #include <linux/net.h>
47 #include <linux/ktime.h>
48 
49 #include <linux/sunrpc/clnt.h>
50 #include <linux/sunrpc/metrics.h>
51 #include <linux/sunrpc/bc_xprt.h>
52 #include <linux/rcupdate.h>
53 #include <linux/sched/mm.h>
54 
55 #include <trace/events/sunrpc.h>
56 
57 #include "sunrpc.h"
58 
59 /*
60  * Local variables
61  */
62 
63 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
64 # define RPCDBG_FACILITY	RPCDBG_XPRT
65 #endif
66 
67 /*
68  * Local functions
69  */
70 static void	 xprt_init(struct rpc_xprt *xprt, struct net *net);
71 static __be32	xprt_alloc_xid(struct rpc_xprt *xprt);
72 static void	 xprt_destroy(struct rpc_xprt *xprt);
73 static void	 xprt_request_init(struct rpc_task *task);
74 
75 static DEFINE_SPINLOCK(xprt_list_lock);
76 static LIST_HEAD(xprt_list);
77 
xprt_request_timeout(const struct rpc_rqst * req)78 static unsigned long xprt_request_timeout(const struct rpc_rqst *req)
79 {
80 	unsigned long timeout = jiffies + req->rq_timeout;
81 
82 	if (time_before(timeout, req->rq_majortimeo))
83 		return timeout;
84 	return req->rq_majortimeo;
85 }
86 
87 /**
88  * xprt_register_transport - register a transport implementation
89  * @transport: transport to register
90  *
91  * If a transport implementation is loaded as a kernel module, it can
92  * call this interface to make itself known to the RPC client.
93  *
94  * Returns:
95  * 0:		transport successfully registered
96  * -EEXIST:	transport already registered
97  * -EINVAL:	transport module being unloaded
98  */
xprt_register_transport(struct xprt_class * transport)99 int xprt_register_transport(struct xprt_class *transport)
100 {
101 	struct xprt_class *t;
102 	int result;
103 
104 	result = -EEXIST;
105 	spin_lock(&xprt_list_lock);
106 	list_for_each_entry(t, &xprt_list, list) {
107 		/* don't register the same transport class twice */
108 		if (t->ident == transport->ident)
109 			goto out;
110 	}
111 
112 	list_add_tail(&transport->list, &xprt_list);
113 	printk(KERN_INFO "RPC: Registered %s transport module.\n",
114 	       transport->name);
115 	result = 0;
116 
117 out:
118 	spin_unlock(&xprt_list_lock);
119 	return result;
120 }
121 EXPORT_SYMBOL_GPL(xprt_register_transport);
122 
123 /**
124  * xprt_unregister_transport - unregister a transport implementation
125  * @transport: transport to unregister
126  *
127  * Returns:
128  * 0:		transport successfully unregistered
129  * -ENOENT:	transport never registered
130  */
xprt_unregister_transport(struct xprt_class * transport)131 int xprt_unregister_transport(struct xprt_class *transport)
132 {
133 	struct xprt_class *t;
134 	int result;
135 
136 	result = 0;
137 	spin_lock(&xprt_list_lock);
138 	list_for_each_entry(t, &xprt_list, list) {
139 		if (t == transport) {
140 			printk(KERN_INFO
141 				"RPC: Unregistered %s transport module.\n",
142 				transport->name);
143 			list_del_init(&transport->list);
144 			goto out;
145 		}
146 	}
147 	result = -ENOENT;
148 
149 out:
150 	spin_unlock(&xprt_list_lock);
151 	return result;
152 }
153 EXPORT_SYMBOL_GPL(xprt_unregister_transport);
154 
155 static void
xprt_class_release(const struct xprt_class * t)156 xprt_class_release(const struct xprt_class *t)
157 {
158 	module_put(t->owner);
159 }
160 
161 static const struct xprt_class *
xprt_class_find_by_netid_locked(const char * netid)162 xprt_class_find_by_netid_locked(const char *netid)
163 {
164 	const struct xprt_class *t;
165 	unsigned int i;
166 
167 	list_for_each_entry(t, &xprt_list, list) {
168 		for (i = 0; t->netid[i][0] != '\0'; i++) {
169 			if (strcmp(t->netid[i], netid) != 0)
170 				continue;
171 			if (!try_module_get(t->owner))
172 				continue;
173 			return t;
174 		}
175 	}
176 	return NULL;
177 }
178 
179 static const struct xprt_class *
xprt_class_find_by_netid(const char * netid)180 xprt_class_find_by_netid(const char *netid)
181 {
182 	const struct xprt_class *t;
183 
184 	spin_lock(&xprt_list_lock);
185 	t = xprt_class_find_by_netid_locked(netid);
186 	if (!t) {
187 		spin_unlock(&xprt_list_lock);
188 		request_module("rpc%s", netid);
189 		spin_lock(&xprt_list_lock);
190 		t = xprt_class_find_by_netid_locked(netid);
191 	}
192 	spin_unlock(&xprt_list_lock);
193 	return t;
194 }
195 
196 /**
197  * xprt_load_transport - load a transport implementation
198  * @netid: transport to load
199  *
200  * Returns:
201  * 0:		transport successfully loaded
202  * -ENOENT:	transport module not available
203  */
xprt_load_transport(const char * netid)204 int xprt_load_transport(const char *netid)
205 {
206 	const struct xprt_class *t;
207 
208 	t = xprt_class_find_by_netid(netid);
209 	if (!t)
210 		return -ENOENT;
211 	xprt_class_release(t);
212 	return 0;
213 }
214 EXPORT_SYMBOL_GPL(xprt_load_transport);
215 
xprt_clear_locked(struct rpc_xprt * xprt)216 static void xprt_clear_locked(struct rpc_xprt *xprt)
217 {
218 	xprt->snd_task = NULL;
219 	if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
220 		smp_mb__before_atomic();
221 		clear_bit(XPRT_LOCKED, &xprt->state);
222 		smp_mb__after_atomic();
223 	} else
224 		queue_work(xprtiod_workqueue, &xprt->task_cleanup);
225 }
226 
227 /**
228  * xprt_reserve_xprt - serialize write access to transports
229  * @task: task that is requesting access to the transport
230  * @xprt: pointer to the target transport
231  *
232  * This prevents mixing the payload of separate requests, and prevents
233  * transport connects from colliding with writes.  No congestion control
234  * is provided.
235  */
xprt_reserve_xprt(struct rpc_xprt * xprt,struct rpc_task * task)236 int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
237 {
238 	struct rpc_rqst *req = task->tk_rqstp;
239 
240 	if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
241 		if (task == xprt->snd_task)
242 			goto out_locked;
243 		goto out_sleep;
244 	}
245 	if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
246 		goto out_unlock;
247 	xprt->snd_task = task;
248 
249 out_locked:
250 	trace_xprt_reserve_xprt(xprt, task);
251 	return 1;
252 
253 out_unlock:
254 	xprt_clear_locked(xprt);
255 out_sleep:
256 	task->tk_status = -EAGAIN;
257 	if  (RPC_IS_SOFT(task))
258 		rpc_sleep_on_timeout(&xprt->sending, task, NULL,
259 				xprt_request_timeout(req));
260 	else
261 		rpc_sleep_on(&xprt->sending, task, NULL);
262 	return 0;
263 }
264 EXPORT_SYMBOL_GPL(xprt_reserve_xprt);
265 
266 static bool
xprt_need_congestion_window_wait(struct rpc_xprt * xprt)267 xprt_need_congestion_window_wait(struct rpc_xprt *xprt)
268 {
269 	return test_bit(XPRT_CWND_WAIT, &xprt->state);
270 }
271 
272 static void
xprt_set_congestion_window_wait(struct rpc_xprt * xprt)273 xprt_set_congestion_window_wait(struct rpc_xprt *xprt)
274 {
275 	if (!list_empty(&xprt->xmit_queue)) {
276 		/* Peek at head of queue to see if it can make progress */
277 		if (list_first_entry(&xprt->xmit_queue, struct rpc_rqst,
278 					rq_xmit)->rq_cong)
279 			return;
280 	}
281 	set_bit(XPRT_CWND_WAIT, &xprt->state);
282 }
283 
284 static void
xprt_test_and_clear_congestion_window_wait(struct rpc_xprt * xprt)285 xprt_test_and_clear_congestion_window_wait(struct rpc_xprt *xprt)
286 {
287 	if (!RPCXPRT_CONGESTED(xprt))
288 		clear_bit(XPRT_CWND_WAIT, &xprt->state);
289 }
290 
291 /*
292  * xprt_reserve_xprt_cong - serialize write access to transports
293  * @task: task that is requesting access to the transport
294  *
295  * Same as xprt_reserve_xprt, but Van Jacobson congestion control is
296  * integrated into the decision of whether a request is allowed to be
297  * woken up and given access to the transport.
298  * Note that the lock is only granted if we know there are free slots.
299  */
xprt_reserve_xprt_cong(struct rpc_xprt * xprt,struct rpc_task * task)300 int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
301 {
302 	struct rpc_rqst *req = task->tk_rqstp;
303 
304 	if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
305 		if (task == xprt->snd_task)
306 			goto out_locked;
307 		goto out_sleep;
308 	}
309 	if (req == NULL) {
310 		xprt->snd_task = task;
311 		goto out_locked;
312 	}
313 	if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
314 		goto out_unlock;
315 	if (!xprt_need_congestion_window_wait(xprt)) {
316 		xprt->snd_task = task;
317 		goto out_locked;
318 	}
319 out_unlock:
320 	xprt_clear_locked(xprt);
321 out_sleep:
322 	task->tk_status = -EAGAIN;
323 	if (RPC_IS_SOFT(task))
324 		rpc_sleep_on_timeout(&xprt->sending, task, NULL,
325 				xprt_request_timeout(req));
326 	else
327 		rpc_sleep_on(&xprt->sending, task, NULL);
328 	return 0;
329 out_locked:
330 	trace_xprt_reserve_cong(xprt, task);
331 	return 1;
332 }
333 EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong);
334 
xprt_lock_write(struct rpc_xprt * xprt,struct rpc_task * task)335 static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
336 {
337 	int retval;
338 
339 	if (test_bit(XPRT_LOCKED, &xprt->state) && xprt->snd_task == task)
340 		return 1;
341 	spin_lock(&xprt->transport_lock);
342 	retval = xprt->ops->reserve_xprt(xprt, task);
343 	spin_unlock(&xprt->transport_lock);
344 	return retval;
345 }
346 
__xprt_lock_write_func(struct rpc_task * task,void * data)347 static bool __xprt_lock_write_func(struct rpc_task *task, void *data)
348 {
349 	struct rpc_xprt *xprt = data;
350 
351 	xprt->snd_task = task;
352 	return true;
353 }
354 
__xprt_lock_write_next(struct rpc_xprt * xprt)355 static void __xprt_lock_write_next(struct rpc_xprt *xprt)
356 {
357 	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
358 		return;
359 	if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
360 		goto out_unlock;
361 	if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending,
362 				__xprt_lock_write_func, xprt))
363 		return;
364 out_unlock:
365 	xprt_clear_locked(xprt);
366 }
367 
__xprt_lock_write_next_cong(struct rpc_xprt * xprt)368 static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
369 {
370 	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
371 		return;
372 	if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
373 		goto out_unlock;
374 	if (xprt_need_congestion_window_wait(xprt))
375 		goto out_unlock;
376 	if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending,
377 				__xprt_lock_write_func, xprt))
378 		return;
379 out_unlock:
380 	xprt_clear_locked(xprt);
381 }
382 
383 /**
384  * xprt_release_xprt - allow other requests to use a transport
385  * @xprt: transport with other tasks potentially waiting
386  * @task: task that is releasing access to the transport
387  *
388  * Note that "task" can be NULL.  No congestion control is provided.
389  */
xprt_release_xprt(struct rpc_xprt * xprt,struct rpc_task * task)390 void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
391 {
392 	if (xprt->snd_task == task) {
393 		xprt_clear_locked(xprt);
394 		__xprt_lock_write_next(xprt);
395 	}
396 	trace_xprt_release_xprt(xprt, task);
397 }
398 EXPORT_SYMBOL_GPL(xprt_release_xprt);
399 
400 /**
401  * xprt_release_xprt_cong - allow other requests to use a transport
402  * @xprt: transport with other tasks potentially waiting
403  * @task: task that is releasing access to the transport
404  *
405  * Note that "task" can be NULL.  Another task is awoken to use the
406  * transport if the transport's congestion window allows it.
407  */
xprt_release_xprt_cong(struct rpc_xprt * xprt,struct rpc_task * task)408 void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
409 {
410 	if (xprt->snd_task == task) {
411 		xprt_clear_locked(xprt);
412 		__xprt_lock_write_next_cong(xprt);
413 	}
414 	trace_xprt_release_cong(xprt, task);
415 }
416 EXPORT_SYMBOL_GPL(xprt_release_xprt_cong);
417 
xprt_release_write(struct rpc_xprt * xprt,struct rpc_task * task)418 static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
419 {
420 	if (xprt->snd_task != task)
421 		return;
422 	spin_lock(&xprt->transport_lock);
423 	xprt->ops->release_xprt(xprt, task);
424 	spin_unlock(&xprt->transport_lock);
425 }
426 
427 /*
428  * Van Jacobson congestion avoidance. Check if the congestion window
429  * overflowed. Put the task to sleep if this is the case.
430  */
431 static int
__xprt_get_cong(struct rpc_xprt * xprt,struct rpc_rqst * req)432 __xprt_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
433 {
434 	if (req->rq_cong)
435 		return 1;
436 	trace_xprt_get_cong(xprt, req->rq_task);
437 	if (RPCXPRT_CONGESTED(xprt)) {
438 		xprt_set_congestion_window_wait(xprt);
439 		return 0;
440 	}
441 	req->rq_cong = 1;
442 	xprt->cong += RPC_CWNDSCALE;
443 	return 1;
444 }
445 
446 /*
447  * Adjust the congestion window, and wake up the next task
448  * that has been sleeping due to congestion
449  */
450 static void
__xprt_put_cong(struct rpc_xprt * xprt,struct rpc_rqst * req)451 __xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
452 {
453 	if (!req->rq_cong)
454 		return;
455 	req->rq_cong = 0;
456 	xprt->cong -= RPC_CWNDSCALE;
457 	xprt_test_and_clear_congestion_window_wait(xprt);
458 	trace_xprt_put_cong(xprt, req->rq_task);
459 	__xprt_lock_write_next_cong(xprt);
460 }
461 
462 /**
463  * xprt_request_get_cong - Request congestion control credits
464  * @xprt: pointer to transport
465  * @req: pointer to RPC request
466  *
467  * Useful for transports that require congestion control.
468  */
469 bool
xprt_request_get_cong(struct rpc_xprt * xprt,struct rpc_rqst * req)470 xprt_request_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
471 {
472 	bool ret = false;
473 
474 	if (req->rq_cong)
475 		return true;
476 	spin_lock(&xprt->transport_lock);
477 	ret = __xprt_get_cong(xprt, req) != 0;
478 	spin_unlock(&xprt->transport_lock);
479 	return ret;
480 }
481 EXPORT_SYMBOL_GPL(xprt_request_get_cong);
482 
483 /**
484  * xprt_release_rqst_cong - housekeeping when request is complete
485  * @task: RPC request that recently completed
486  *
487  * Useful for transports that require congestion control.
488  */
xprt_release_rqst_cong(struct rpc_task * task)489 void xprt_release_rqst_cong(struct rpc_task *task)
490 {
491 	struct rpc_rqst *req = task->tk_rqstp;
492 
493 	__xprt_put_cong(req->rq_xprt, req);
494 }
495 EXPORT_SYMBOL_GPL(xprt_release_rqst_cong);
496 
xprt_clear_congestion_window_wait_locked(struct rpc_xprt * xprt)497 static void xprt_clear_congestion_window_wait_locked(struct rpc_xprt *xprt)
498 {
499 	if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state))
500 		__xprt_lock_write_next_cong(xprt);
501 }
502 
503 /*
504  * Clear the congestion window wait flag and wake up the next
505  * entry on xprt->sending
506  */
507 static void
xprt_clear_congestion_window_wait(struct rpc_xprt * xprt)508 xprt_clear_congestion_window_wait(struct rpc_xprt *xprt)
509 {
510 	if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state)) {
511 		spin_lock(&xprt->transport_lock);
512 		__xprt_lock_write_next_cong(xprt);
513 		spin_unlock(&xprt->transport_lock);
514 	}
515 }
516 
517 /**
518  * xprt_adjust_cwnd - adjust transport congestion window
519  * @xprt: pointer to xprt
520  * @task: recently completed RPC request used to adjust window
521  * @result: result code of completed RPC request
522  *
523  * The transport code maintains an estimate on the maximum number of out-
524  * standing RPC requests, using a smoothed version of the congestion
525  * avoidance implemented in 44BSD. This is basically the Van Jacobson
526  * congestion algorithm: If a retransmit occurs, the congestion window is
527  * halved; otherwise, it is incremented by 1/cwnd when
528  *
529  *	-	a reply is received and
530  *	-	a full number of requests are outstanding and
531  *	-	the congestion window hasn't been updated recently.
532  */
xprt_adjust_cwnd(struct rpc_xprt * xprt,struct rpc_task * task,int result)533 void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result)
534 {
535 	struct rpc_rqst *req = task->tk_rqstp;
536 	unsigned long cwnd = xprt->cwnd;
537 
538 	if (result >= 0 && cwnd <= xprt->cong) {
539 		/* The (cwnd >> 1) term makes sure
540 		 * the result gets rounded properly. */
541 		cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd;
542 		if (cwnd > RPC_MAXCWND(xprt))
543 			cwnd = RPC_MAXCWND(xprt);
544 		__xprt_lock_write_next_cong(xprt);
545 	} else if (result == -ETIMEDOUT) {
546 		cwnd >>= 1;
547 		if (cwnd < RPC_CWNDSCALE)
548 			cwnd = RPC_CWNDSCALE;
549 	}
550 	dprintk("RPC:       cong %ld, cwnd was %ld, now %ld\n",
551 			xprt->cong, xprt->cwnd, cwnd);
552 	xprt->cwnd = cwnd;
553 	__xprt_put_cong(xprt, req);
554 }
555 EXPORT_SYMBOL_GPL(xprt_adjust_cwnd);
556 
557 /**
558  * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue
559  * @xprt: transport with waiting tasks
560  * @status: result code to plant in each task before waking it
561  *
562  */
xprt_wake_pending_tasks(struct rpc_xprt * xprt,int status)563 void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status)
564 {
565 	if (status < 0)
566 		rpc_wake_up_status(&xprt->pending, status);
567 	else
568 		rpc_wake_up(&xprt->pending);
569 }
570 EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks);
571 
572 /**
573  * xprt_wait_for_buffer_space - wait for transport output buffer to clear
574  * @xprt: transport
575  *
576  * Note that we only set the timer for the case of RPC_IS_SOFT(), since
577  * we don't in general want to force a socket disconnection due to
578  * an incomplete RPC call transmission.
579  */
xprt_wait_for_buffer_space(struct rpc_xprt * xprt)580 void xprt_wait_for_buffer_space(struct rpc_xprt *xprt)
581 {
582 	set_bit(XPRT_WRITE_SPACE, &xprt->state);
583 }
584 EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space);
585 
586 static bool
xprt_clear_write_space_locked(struct rpc_xprt * xprt)587 xprt_clear_write_space_locked(struct rpc_xprt *xprt)
588 {
589 	if (test_and_clear_bit(XPRT_WRITE_SPACE, &xprt->state)) {
590 		__xprt_lock_write_next(xprt);
591 		dprintk("RPC:       write space: waking waiting task on "
592 				"xprt %p\n", xprt);
593 		return true;
594 	}
595 	return false;
596 }
597 
598 /**
599  * xprt_write_space - wake the task waiting for transport output buffer space
600  * @xprt: transport with waiting tasks
601  *
602  * Can be called in a soft IRQ context, so xprt_write_space never sleeps.
603  */
xprt_write_space(struct rpc_xprt * xprt)604 bool xprt_write_space(struct rpc_xprt *xprt)
605 {
606 	bool ret;
607 
608 	if (!test_bit(XPRT_WRITE_SPACE, &xprt->state))
609 		return false;
610 	spin_lock(&xprt->transport_lock);
611 	ret = xprt_clear_write_space_locked(xprt);
612 	spin_unlock(&xprt->transport_lock);
613 	return ret;
614 }
615 EXPORT_SYMBOL_GPL(xprt_write_space);
616 
xprt_abs_ktime_to_jiffies(ktime_t abstime)617 static unsigned long xprt_abs_ktime_to_jiffies(ktime_t abstime)
618 {
619 	s64 delta = ktime_to_ns(ktime_get() - abstime);
620 	return likely(delta >= 0) ?
621 		jiffies - nsecs_to_jiffies(delta) :
622 		jiffies + nsecs_to_jiffies(-delta);
623 }
624 
xprt_calc_majortimeo(struct rpc_rqst * req)625 static unsigned long xprt_calc_majortimeo(struct rpc_rqst *req)
626 {
627 	const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
628 	unsigned long majortimeo = req->rq_timeout;
629 
630 	if (to->to_exponential)
631 		majortimeo <<= to->to_retries;
632 	else
633 		majortimeo += to->to_increment * to->to_retries;
634 	if (majortimeo > to->to_maxval || majortimeo == 0)
635 		majortimeo = to->to_maxval;
636 	return majortimeo;
637 }
638 
xprt_reset_majortimeo(struct rpc_rqst * req)639 static void xprt_reset_majortimeo(struct rpc_rqst *req)
640 {
641 	req->rq_majortimeo += xprt_calc_majortimeo(req);
642 }
643 
xprt_reset_minortimeo(struct rpc_rqst * req)644 static void xprt_reset_minortimeo(struct rpc_rqst *req)
645 {
646 	req->rq_minortimeo += req->rq_timeout;
647 }
648 
xprt_init_majortimeo(struct rpc_task * task,struct rpc_rqst * req)649 static void xprt_init_majortimeo(struct rpc_task *task, struct rpc_rqst *req)
650 {
651 	unsigned long time_init;
652 	struct rpc_xprt *xprt = req->rq_xprt;
653 
654 	if (likely(xprt && xprt_connected(xprt)))
655 		time_init = jiffies;
656 	else
657 		time_init = xprt_abs_ktime_to_jiffies(task->tk_start);
658 	req->rq_timeout = task->tk_client->cl_timeout->to_initval;
659 	req->rq_majortimeo = time_init + xprt_calc_majortimeo(req);
660 	req->rq_minortimeo = time_init + req->rq_timeout;
661 }
662 
663 /**
664  * xprt_adjust_timeout - adjust timeout values for next retransmit
665  * @req: RPC request containing parameters to use for the adjustment
666  *
667  */
xprt_adjust_timeout(struct rpc_rqst * req)668 int xprt_adjust_timeout(struct rpc_rqst *req)
669 {
670 	struct rpc_xprt *xprt = req->rq_xprt;
671 	const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
672 	int status = 0;
673 
674 	if (time_before(jiffies, req->rq_majortimeo)) {
675 		if (time_before(jiffies, req->rq_minortimeo))
676 			return status;
677 		if (to->to_exponential)
678 			req->rq_timeout <<= 1;
679 		else
680 			req->rq_timeout += to->to_increment;
681 		if (to->to_maxval && req->rq_timeout >= to->to_maxval)
682 			req->rq_timeout = to->to_maxval;
683 		req->rq_retries++;
684 	} else {
685 		req->rq_timeout = to->to_initval;
686 		req->rq_retries = 0;
687 		xprt_reset_majortimeo(req);
688 		/* Reset the RTT counters == "slow start" */
689 		spin_lock(&xprt->transport_lock);
690 		rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval);
691 		spin_unlock(&xprt->transport_lock);
692 		status = -ETIMEDOUT;
693 	}
694 	xprt_reset_minortimeo(req);
695 
696 	if (req->rq_timeout == 0) {
697 		printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n");
698 		req->rq_timeout = 5 * HZ;
699 	}
700 	return status;
701 }
702 
xprt_autoclose(struct work_struct * work)703 static void xprt_autoclose(struct work_struct *work)
704 {
705 	struct rpc_xprt *xprt =
706 		container_of(work, struct rpc_xprt, task_cleanup);
707 	unsigned int pflags = memalloc_nofs_save();
708 
709 	trace_xprt_disconnect_auto(xprt);
710 	clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
711 	xprt->ops->close(xprt);
712 	xprt_release_write(xprt, NULL);
713 	wake_up_bit(&xprt->state, XPRT_LOCKED);
714 	memalloc_nofs_restore(pflags);
715 }
716 
717 /**
718  * xprt_disconnect_done - mark a transport as disconnected
719  * @xprt: transport to flag for disconnect
720  *
721  */
xprt_disconnect_done(struct rpc_xprt * xprt)722 void xprt_disconnect_done(struct rpc_xprt *xprt)
723 {
724 	trace_xprt_disconnect_done(xprt);
725 	spin_lock(&xprt->transport_lock);
726 	xprt_clear_connected(xprt);
727 	xprt_clear_write_space_locked(xprt);
728 	xprt_clear_congestion_window_wait_locked(xprt);
729 	xprt_wake_pending_tasks(xprt, -ENOTCONN);
730 	spin_unlock(&xprt->transport_lock);
731 }
732 EXPORT_SYMBOL_GPL(xprt_disconnect_done);
733 
734 /**
735  * xprt_schedule_autoclose_locked - Try to schedule an autoclose RPC call
736  * @xprt: transport to disconnect
737  */
xprt_schedule_autoclose_locked(struct rpc_xprt * xprt)738 static void xprt_schedule_autoclose_locked(struct rpc_xprt *xprt)
739 {
740 	if (test_and_set_bit(XPRT_CLOSE_WAIT, &xprt->state))
741 		return;
742 	if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
743 		queue_work(xprtiod_workqueue, &xprt->task_cleanup);
744 	else if (xprt->snd_task && !test_bit(XPRT_SND_IS_COOKIE, &xprt->state))
745 		rpc_wake_up_queued_task_set_status(&xprt->pending,
746 						   xprt->snd_task, -ENOTCONN);
747 }
748 
749 /**
750  * xprt_force_disconnect - force a transport to disconnect
751  * @xprt: transport to disconnect
752  *
753  */
xprt_force_disconnect(struct rpc_xprt * xprt)754 void xprt_force_disconnect(struct rpc_xprt *xprt)
755 {
756 	trace_xprt_disconnect_force(xprt);
757 
758 	/* Don't race with the test_bit() in xprt_clear_locked() */
759 	spin_lock(&xprt->transport_lock);
760 	xprt_schedule_autoclose_locked(xprt);
761 	spin_unlock(&xprt->transport_lock);
762 }
763 EXPORT_SYMBOL_GPL(xprt_force_disconnect);
764 
765 static unsigned int
xprt_connect_cookie(struct rpc_xprt * xprt)766 xprt_connect_cookie(struct rpc_xprt *xprt)
767 {
768 	return READ_ONCE(xprt->connect_cookie);
769 }
770 
771 static bool
xprt_request_retransmit_after_disconnect(struct rpc_task * task)772 xprt_request_retransmit_after_disconnect(struct rpc_task *task)
773 {
774 	struct rpc_rqst *req = task->tk_rqstp;
775 	struct rpc_xprt *xprt = req->rq_xprt;
776 
777 	return req->rq_connect_cookie != xprt_connect_cookie(xprt) ||
778 		!xprt_connected(xprt);
779 }
780 
781 /**
782  * xprt_conditional_disconnect - force a transport to disconnect
783  * @xprt: transport to disconnect
784  * @cookie: 'connection cookie'
785  *
786  * This attempts to break the connection if and only if 'cookie' matches
787  * the current transport 'connection cookie'. It ensures that we don't
788  * try to break the connection more than once when we need to retransmit
789  * a batch of RPC requests.
790  *
791  */
xprt_conditional_disconnect(struct rpc_xprt * xprt,unsigned int cookie)792 void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie)
793 {
794 	/* Don't race with the test_bit() in xprt_clear_locked() */
795 	spin_lock(&xprt->transport_lock);
796 	if (cookie != xprt->connect_cookie)
797 		goto out;
798 	if (test_bit(XPRT_CLOSING, &xprt->state))
799 		goto out;
800 	xprt_schedule_autoclose_locked(xprt);
801 out:
802 	spin_unlock(&xprt->transport_lock);
803 }
804 
805 static bool
xprt_has_timer(const struct rpc_xprt * xprt)806 xprt_has_timer(const struct rpc_xprt *xprt)
807 {
808 	return xprt->idle_timeout != 0;
809 }
810 
811 static void
xprt_schedule_autodisconnect(struct rpc_xprt * xprt)812 xprt_schedule_autodisconnect(struct rpc_xprt *xprt)
813 	__must_hold(&xprt->transport_lock)
814 {
815 	xprt->last_used = jiffies;
816 	if (RB_EMPTY_ROOT(&xprt->recv_queue) && xprt_has_timer(xprt))
817 		mod_timer(&xprt->timer, xprt->last_used + xprt->idle_timeout);
818 }
819 
820 static void
xprt_init_autodisconnect(struct timer_list * t)821 xprt_init_autodisconnect(struct timer_list *t)
822 {
823 	struct rpc_xprt *xprt = from_timer(xprt, t, timer);
824 
825 	if (!RB_EMPTY_ROOT(&xprt->recv_queue))
826 		return;
827 	/* Reset xprt->last_used to avoid connect/autodisconnect cycling */
828 	xprt->last_used = jiffies;
829 	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
830 		return;
831 	queue_work(xprtiod_workqueue, &xprt->task_cleanup);
832 }
833 
xprt_lock_connect(struct rpc_xprt * xprt,struct rpc_task * task,void * cookie)834 bool xprt_lock_connect(struct rpc_xprt *xprt,
835 		struct rpc_task *task,
836 		void *cookie)
837 {
838 	bool ret = false;
839 
840 	spin_lock(&xprt->transport_lock);
841 	if (!test_bit(XPRT_LOCKED, &xprt->state))
842 		goto out;
843 	if (xprt->snd_task != task)
844 		goto out;
845 	set_bit(XPRT_SND_IS_COOKIE, &xprt->state);
846 	xprt->snd_task = cookie;
847 	ret = true;
848 out:
849 	spin_unlock(&xprt->transport_lock);
850 	return ret;
851 }
852 EXPORT_SYMBOL_GPL(xprt_lock_connect);
853 
xprt_unlock_connect(struct rpc_xprt * xprt,void * cookie)854 void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie)
855 {
856 	spin_lock(&xprt->transport_lock);
857 	if (xprt->snd_task != cookie)
858 		goto out;
859 	if (!test_bit(XPRT_LOCKED, &xprt->state))
860 		goto out;
861 	xprt->snd_task =NULL;
862 	clear_bit(XPRT_SND_IS_COOKIE, &xprt->state);
863 	xprt->ops->release_xprt(xprt, NULL);
864 	xprt_schedule_autodisconnect(xprt);
865 out:
866 	spin_unlock(&xprt->transport_lock);
867 	wake_up_bit(&xprt->state, XPRT_LOCKED);
868 }
869 EXPORT_SYMBOL_GPL(xprt_unlock_connect);
870 
871 /**
872  * xprt_connect - schedule a transport connect operation
873  * @task: RPC task that is requesting the connect
874  *
875  */
xprt_connect(struct rpc_task * task)876 void xprt_connect(struct rpc_task *task)
877 {
878 	struct rpc_xprt	*xprt = task->tk_rqstp->rq_xprt;
879 
880 	trace_xprt_connect(xprt);
881 
882 	if (!xprt_bound(xprt)) {
883 		task->tk_status = -EAGAIN;
884 		return;
885 	}
886 	if (!xprt_lock_write(xprt, task))
887 		return;
888 
889 	if (!xprt_connected(xprt) && !test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
890 		task->tk_rqstp->rq_connect_cookie = xprt->connect_cookie;
891 		rpc_sleep_on_timeout(&xprt->pending, task, NULL,
892 				xprt_request_timeout(task->tk_rqstp));
893 
894 		if (test_bit(XPRT_CLOSING, &xprt->state))
895 			return;
896 		if (xprt_test_and_set_connecting(xprt))
897 			return;
898 		/* Race breaker */
899 		if (!xprt_connected(xprt)) {
900 			xprt->stat.connect_start = jiffies;
901 			xprt->ops->connect(xprt, task);
902 		} else {
903 			xprt_clear_connecting(xprt);
904 			task->tk_status = 0;
905 			rpc_wake_up_queued_task(&xprt->pending, task);
906 		}
907 	}
908 	xprt_release_write(xprt, task);
909 }
910 
911 /**
912  * xprt_reconnect_delay - compute the wait before scheduling a connect
913  * @xprt: transport instance
914  *
915  */
xprt_reconnect_delay(const struct rpc_xprt * xprt)916 unsigned long xprt_reconnect_delay(const struct rpc_xprt *xprt)
917 {
918 	unsigned long start, now = jiffies;
919 
920 	start = xprt->stat.connect_start + xprt->reestablish_timeout;
921 	if (time_after(start, now))
922 		return start - now;
923 	return 0;
924 }
925 EXPORT_SYMBOL_GPL(xprt_reconnect_delay);
926 
927 /**
928  * xprt_reconnect_backoff - compute the new re-establish timeout
929  * @xprt: transport instance
930  * @init_to: initial reestablish timeout
931  *
932  */
xprt_reconnect_backoff(struct rpc_xprt * xprt,unsigned long init_to)933 void xprt_reconnect_backoff(struct rpc_xprt *xprt, unsigned long init_to)
934 {
935 	xprt->reestablish_timeout <<= 1;
936 	if (xprt->reestablish_timeout > xprt->max_reconnect_timeout)
937 		xprt->reestablish_timeout = xprt->max_reconnect_timeout;
938 	if (xprt->reestablish_timeout < init_to)
939 		xprt->reestablish_timeout = init_to;
940 }
941 EXPORT_SYMBOL_GPL(xprt_reconnect_backoff);
942 
943 enum xprt_xid_rb_cmp {
944 	XID_RB_EQUAL,
945 	XID_RB_LEFT,
946 	XID_RB_RIGHT,
947 };
948 static enum xprt_xid_rb_cmp
xprt_xid_cmp(__be32 xid1,__be32 xid2)949 xprt_xid_cmp(__be32 xid1, __be32 xid2)
950 {
951 	if (xid1 == xid2)
952 		return XID_RB_EQUAL;
953 	if ((__force u32)xid1 < (__force u32)xid2)
954 		return XID_RB_LEFT;
955 	return XID_RB_RIGHT;
956 }
957 
958 static struct rpc_rqst *
xprt_request_rb_find(struct rpc_xprt * xprt,__be32 xid)959 xprt_request_rb_find(struct rpc_xprt *xprt, __be32 xid)
960 {
961 	struct rb_node *n = xprt->recv_queue.rb_node;
962 	struct rpc_rqst *req;
963 
964 	while (n != NULL) {
965 		req = rb_entry(n, struct rpc_rqst, rq_recv);
966 		switch (xprt_xid_cmp(xid, req->rq_xid)) {
967 		case XID_RB_LEFT:
968 			n = n->rb_left;
969 			break;
970 		case XID_RB_RIGHT:
971 			n = n->rb_right;
972 			break;
973 		case XID_RB_EQUAL:
974 			return req;
975 		}
976 	}
977 	return NULL;
978 }
979 
980 static void
xprt_request_rb_insert(struct rpc_xprt * xprt,struct rpc_rqst * new)981 xprt_request_rb_insert(struct rpc_xprt *xprt, struct rpc_rqst *new)
982 {
983 	struct rb_node **p = &xprt->recv_queue.rb_node;
984 	struct rb_node *n = NULL;
985 	struct rpc_rqst *req;
986 
987 	while (*p != NULL) {
988 		n = *p;
989 		req = rb_entry(n, struct rpc_rqst, rq_recv);
990 		switch(xprt_xid_cmp(new->rq_xid, req->rq_xid)) {
991 		case XID_RB_LEFT:
992 			p = &n->rb_left;
993 			break;
994 		case XID_RB_RIGHT:
995 			p = &n->rb_right;
996 			break;
997 		case XID_RB_EQUAL:
998 			WARN_ON_ONCE(new != req);
999 			return;
1000 		}
1001 	}
1002 	rb_link_node(&new->rq_recv, n, p);
1003 	rb_insert_color(&new->rq_recv, &xprt->recv_queue);
1004 }
1005 
1006 static void
xprt_request_rb_remove(struct rpc_xprt * xprt,struct rpc_rqst * req)1007 xprt_request_rb_remove(struct rpc_xprt *xprt, struct rpc_rqst *req)
1008 {
1009 	rb_erase(&req->rq_recv, &xprt->recv_queue);
1010 }
1011 
1012 /**
1013  * xprt_lookup_rqst - find an RPC request corresponding to an XID
1014  * @xprt: transport on which the original request was transmitted
1015  * @xid: RPC XID of incoming reply
1016  *
1017  * Caller holds xprt->queue_lock.
1018  */
xprt_lookup_rqst(struct rpc_xprt * xprt,__be32 xid)1019 struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
1020 {
1021 	struct rpc_rqst *entry;
1022 
1023 	entry = xprt_request_rb_find(xprt, xid);
1024 	if (entry != NULL) {
1025 		trace_xprt_lookup_rqst(xprt, xid, 0);
1026 		entry->rq_rtt = ktime_sub(ktime_get(), entry->rq_xtime);
1027 		return entry;
1028 	}
1029 
1030 	dprintk("RPC:       xprt_lookup_rqst did not find xid %08x\n",
1031 			ntohl(xid));
1032 	trace_xprt_lookup_rqst(xprt, xid, -ENOENT);
1033 	xprt->stat.bad_xids++;
1034 	return NULL;
1035 }
1036 EXPORT_SYMBOL_GPL(xprt_lookup_rqst);
1037 
1038 static bool
xprt_is_pinned_rqst(struct rpc_rqst * req)1039 xprt_is_pinned_rqst(struct rpc_rqst *req)
1040 {
1041 	return atomic_read(&req->rq_pin) != 0;
1042 }
1043 
1044 /**
1045  * xprt_pin_rqst - Pin a request on the transport receive list
1046  * @req: Request to pin
1047  *
1048  * Caller must ensure this is atomic with the call to xprt_lookup_rqst()
1049  * so should be holding xprt->queue_lock.
1050  */
xprt_pin_rqst(struct rpc_rqst * req)1051 void xprt_pin_rqst(struct rpc_rqst *req)
1052 {
1053 	atomic_inc(&req->rq_pin);
1054 }
1055 EXPORT_SYMBOL_GPL(xprt_pin_rqst);
1056 
1057 /**
1058  * xprt_unpin_rqst - Unpin a request on the transport receive list
1059  * @req: Request to pin
1060  *
1061  * Caller should be holding xprt->queue_lock.
1062  */
xprt_unpin_rqst(struct rpc_rqst * req)1063 void xprt_unpin_rqst(struct rpc_rqst *req)
1064 {
1065 	if (!test_bit(RPC_TASK_MSG_PIN_WAIT, &req->rq_task->tk_runstate)) {
1066 		atomic_dec(&req->rq_pin);
1067 		return;
1068 	}
1069 	if (atomic_dec_and_test(&req->rq_pin))
1070 		wake_up_var(&req->rq_pin);
1071 }
1072 EXPORT_SYMBOL_GPL(xprt_unpin_rqst);
1073 
xprt_wait_on_pinned_rqst(struct rpc_rqst * req)1074 static void xprt_wait_on_pinned_rqst(struct rpc_rqst *req)
1075 {
1076 	wait_var_event(&req->rq_pin, !xprt_is_pinned_rqst(req));
1077 }
1078 
1079 static bool
xprt_request_data_received(struct rpc_task * task)1080 xprt_request_data_received(struct rpc_task *task)
1081 {
1082 	return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) &&
1083 		READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) != 0;
1084 }
1085 
1086 static bool
xprt_request_need_enqueue_receive(struct rpc_task * task,struct rpc_rqst * req)1087 xprt_request_need_enqueue_receive(struct rpc_task *task, struct rpc_rqst *req)
1088 {
1089 	return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) &&
1090 		READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) == 0;
1091 }
1092 
1093 /**
1094  * xprt_request_enqueue_receive - Add an request to the receive queue
1095  * @task: RPC task
1096  *
1097  */
1098 void
xprt_request_enqueue_receive(struct rpc_task * task)1099 xprt_request_enqueue_receive(struct rpc_task *task)
1100 {
1101 	struct rpc_rqst *req = task->tk_rqstp;
1102 	struct rpc_xprt *xprt = req->rq_xprt;
1103 
1104 	if (!xprt_request_need_enqueue_receive(task, req))
1105 		return;
1106 
1107 	xprt_request_prepare(task->tk_rqstp);
1108 	spin_lock(&xprt->queue_lock);
1109 
1110 	/* Update the softirq receive buffer */
1111 	memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
1112 			sizeof(req->rq_private_buf));
1113 
1114 	/* Add request to the receive list */
1115 	xprt_request_rb_insert(xprt, req);
1116 	set_bit(RPC_TASK_NEED_RECV, &task->tk_runstate);
1117 	spin_unlock(&xprt->queue_lock);
1118 
1119 	/* Turn off autodisconnect */
1120 	del_singleshot_timer_sync(&xprt->timer);
1121 }
1122 
1123 /**
1124  * xprt_request_dequeue_receive_locked - Remove a request from the receive queue
1125  * @task: RPC task
1126  *
1127  * Caller must hold xprt->queue_lock.
1128  */
1129 static void
xprt_request_dequeue_receive_locked(struct rpc_task * task)1130 xprt_request_dequeue_receive_locked(struct rpc_task *task)
1131 {
1132 	struct rpc_rqst *req = task->tk_rqstp;
1133 
1134 	if (test_and_clear_bit(RPC_TASK_NEED_RECV, &task->tk_runstate))
1135 		xprt_request_rb_remove(req->rq_xprt, req);
1136 }
1137 
1138 /**
1139  * xprt_update_rtt - Update RPC RTT statistics
1140  * @task: RPC request that recently completed
1141  *
1142  * Caller holds xprt->queue_lock.
1143  */
xprt_update_rtt(struct rpc_task * task)1144 void xprt_update_rtt(struct rpc_task *task)
1145 {
1146 	struct rpc_rqst *req = task->tk_rqstp;
1147 	struct rpc_rtt *rtt = task->tk_client->cl_rtt;
1148 	unsigned int timer = task->tk_msg.rpc_proc->p_timer;
1149 	long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt));
1150 
1151 	if (timer) {
1152 		if (req->rq_ntrans == 1)
1153 			rpc_update_rtt(rtt, timer, m);
1154 		rpc_set_timeo(rtt, timer, req->rq_ntrans - 1);
1155 	}
1156 }
1157 EXPORT_SYMBOL_GPL(xprt_update_rtt);
1158 
1159 /**
1160  * xprt_complete_rqst - called when reply processing is complete
1161  * @task: RPC request that recently completed
1162  * @copied: actual number of bytes received from the transport
1163  *
1164  * Caller holds xprt->queue_lock.
1165  */
xprt_complete_rqst(struct rpc_task * task,int copied)1166 void xprt_complete_rqst(struct rpc_task *task, int copied)
1167 {
1168 	struct rpc_rqst *req = task->tk_rqstp;
1169 	struct rpc_xprt *xprt = req->rq_xprt;
1170 
1171 	xprt->stat.recvs++;
1172 
1173 	req->rq_private_buf.len = copied;
1174 	/* Ensure all writes are done before we update */
1175 	/* req->rq_reply_bytes_recvd */
1176 	smp_wmb();
1177 	req->rq_reply_bytes_recvd = copied;
1178 	xprt_request_dequeue_receive_locked(task);
1179 	rpc_wake_up_queued_task(&xprt->pending, task);
1180 }
1181 EXPORT_SYMBOL_GPL(xprt_complete_rqst);
1182 
xprt_timer(struct rpc_task * task)1183 static void xprt_timer(struct rpc_task *task)
1184 {
1185 	struct rpc_rqst *req = task->tk_rqstp;
1186 	struct rpc_xprt *xprt = req->rq_xprt;
1187 
1188 	if (task->tk_status != -ETIMEDOUT)
1189 		return;
1190 
1191 	trace_xprt_timer(xprt, req->rq_xid, task->tk_status);
1192 	if (!req->rq_reply_bytes_recvd) {
1193 		if (xprt->ops->timer)
1194 			xprt->ops->timer(xprt, task);
1195 	} else
1196 		task->tk_status = 0;
1197 }
1198 
1199 /**
1200  * xprt_wait_for_reply_request_def - wait for reply
1201  * @task: pointer to rpc_task
1202  *
1203  * Set a request's retransmit timeout based on the transport's
1204  * default timeout parameters.  Used by transports that don't adjust
1205  * the retransmit timeout based on round-trip time estimation,
1206  * and put the task to sleep on the pending queue.
1207  */
xprt_wait_for_reply_request_def(struct rpc_task * task)1208 void xprt_wait_for_reply_request_def(struct rpc_task *task)
1209 {
1210 	struct rpc_rqst *req = task->tk_rqstp;
1211 
1212 	rpc_sleep_on_timeout(&req->rq_xprt->pending, task, xprt_timer,
1213 			xprt_request_timeout(req));
1214 }
1215 EXPORT_SYMBOL_GPL(xprt_wait_for_reply_request_def);
1216 
1217 /**
1218  * xprt_wait_for_reply_request_rtt - wait for reply using RTT estimator
1219  * @task: pointer to rpc_task
1220  *
1221  * Set a request's retransmit timeout using the RTT estimator,
1222  * and put the task to sleep on the pending queue.
1223  */
xprt_wait_for_reply_request_rtt(struct rpc_task * task)1224 void xprt_wait_for_reply_request_rtt(struct rpc_task *task)
1225 {
1226 	int timer = task->tk_msg.rpc_proc->p_timer;
1227 	struct rpc_clnt *clnt = task->tk_client;
1228 	struct rpc_rtt *rtt = clnt->cl_rtt;
1229 	struct rpc_rqst *req = task->tk_rqstp;
1230 	unsigned long max_timeout = clnt->cl_timeout->to_maxval;
1231 	unsigned long timeout;
1232 
1233 	timeout = rpc_calc_rto(rtt, timer);
1234 	timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries;
1235 	if (timeout > max_timeout || timeout == 0)
1236 		timeout = max_timeout;
1237 	rpc_sleep_on_timeout(&req->rq_xprt->pending, task, xprt_timer,
1238 			jiffies + timeout);
1239 }
1240 EXPORT_SYMBOL_GPL(xprt_wait_for_reply_request_rtt);
1241 
1242 /**
1243  * xprt_request_wait_receive - wait for the reply to an RPC request
1244  * @task: RPC task about to send a request
1245  *
1246  */
xprt_request_wait_receive(struct rpc_task * task)1247 void xprt_request_wait_receive(struct rpc_task *task)
1248 {
1249 	struct rpc_rqst *req = task->tk_rqstp;
1250 	struct rpc_xprt *xprt = req->rq_xprt;
1251 
1252 	if (!test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate))
1253 		return;
1254 	/*
1255 	 * Sleep on the pending queue if we're expecting a reply.
1256 	 * The spinlock ensures atomicity between the test of
1257 	 * req->rq_reply_bytes_recvd, and the call to rpc_sleep_on().
1258 	 */
1259 	spin_lock(&xprt->queue_lock);
1260 	if (test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate)) {
1261 		xprt->ops->wait_for_reply_request(task);
1262 		/*
1263 		 * Send an extra queue wakeup call if the
1264 		 * connection was dropped in case the call to
1265 		 * rpc_sleep_on() raced.
1266 		 */
1267 		if (xprt_request_retransmit_after_disconnect(task))
1268 			rpc_wake_up_queued_task_set_status(&xprt->pending,
1269 					task, -ENOTCONN);
1270 	}
1271 	spin_unlock(&xprt->queue_lock);
1272 }
1273 
1274 static bool
xprt_request_need_enqueue_transmit(struct rpc_task * task,struct rpc_rqst * req)1275 xprt_request_need_enqueue_transmit(struct rpc_task *task, struct rpc_rqst *req)
1276 {
1277 	return !test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
1278 }
1279 
1280 /**
1281  * xprt_request_enqueue_transmit - queue a task for transmission
1282  * @task: pointer to rpc_task
1283  *
1284  * Add a task to the transmission queue.
1285  */
1286 void
xprt_request_enqueue_transmit(struct rpc_task * task)1287 xprt_request_enqueue_transmit(struct rpc_task *task)
1288 {
1289 	struct rpc_rqst *pos, *req = task->tk_rqstp;
1290 	struct rpc_xprt *xprt = req->rq_xprt;
1291 
1292 	if (xprt_request_need_enqueue_transmit(task, req)) {
1293 		req->rq_bytes_sent = 0;
1294 		spin_lock(&xprt->queue_lock);
1295 		/*
1296 		 * Requests that carry congestion control credits are added
1297 		 * to the head of the list to avoid starvation issues.
1298 		 */
1299 		if (req->rq_cong) {
1300 			xprt_clear_congestion_window_wait(xprt);
1301 			list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1302 				if (pos->rq_cong)
1303 					continue;
1304 				/* Note: req is added _before_ pos */
1305 				list_add_tail(&req->rq_xmit, &pos->rq_xmit);
1306 				INIT_LIST_HEAD(&req->rq_xmit2);
1307 				goto out;
1308 			}
1309 		} else if (!req->rq_seqno) {
1310 			list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1311 				if (pos->rq_task->tk_owner != task->tk_owner)
1312 					continue;
1313 				list_add_tail(&req->rq_xmit2, &pos->rq_xmit2);
1314 				INIT_LIST_HEAD(&req->rq_xmit);
1315 				goto out;
1316 			}
1317 		}
1318 		list_add_tail(&req->rq_xmit, &xprt->xmit_queue);
1319 		INIT_LIST_HEAD(&req->rq_xmit2);
1320 out:
1321 		set_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
1322 		spin_unlock(&xprt->queue_lock);
1323 	}
1324 }
1325 
1326 /**
1327  * xprt_request_dequeue_transmit_locked - remove a task from the transmission queue
1328  * @task: pointer to rpc_task
1329  *
1330  * Remove a task from the transmission queue
1331  * Caller must hold xprt->queue_lock
1332  */
1333 static void
xprt_request_dequeue_transmit_locked(struct rpc_task * task)1334 xprt_request_dequeue_transmit_locked(struct rpc_task *task)
1335 {
1336 	struct rpc_rqst *req = task->tk_rqstp;
1337 
1338 	if (!test_and_clear_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1339 		return;
1340 	if (!list_empty(&req->rq_xmit)) {
1341 		list_del(&req->rq_xmit);
1342 		if (!list_empty(&req->rq_xmit2)) {
1343 			struct rpc_rqst *next = list_first_entry(&req->rq_xmit2,
1344 					struct rpc_rqst, rq_xmit2);
1345 			list_del(&req->rq_xmit2);
1346 			list_add_tail(&next->rq_xmit, &next->rq_xprt->xmit_queue);
1347 		}
1348 	} else
1349 		list_del(&req->rq_xmit2);
1350 }
1351 
1352 /**
1353  * xprt_request_dequeue_transmit - remove a task from the transmission queue
1354  * @task: pointer to rpc_task
1355  *
1356  * Remove a task from the transmission queue
1357  */
1358 static void
xprt_request_dequeue_transmit(struct rpc_task * task)1359 xprt_request_dequeue_transmit(struct rpc_task *task)
1360 {
1361 	struct rpc_rqst *req = task->tk_rqstp;
1362 	struct rpc_xprt *xprt = req->rq_xprt;
1363 
1364 	spin_lock(&xprt->queue_lock);
1365 	xprt_request_dequeue_transmit_locked(task);
1366 	spin_unlock(&xprt->queue_lock);
1367 }
1368 
1369 /**
1370  * xprt_request_dequeue_xprt - remove a task from the transmit+receive queue
1371  * @task: pointer to rpc_task
1372  *
1373  * Remove a task from the transmit and receive queues, and ensure that
1374  * it is not pinned by the receive work item.
1375  */
1376 void
xprt_request_dequeue_xprt(struct rpc_task * task)1377 xprt_request_dequeue_xprt(struct rpc_task *task)
1378 {
1379 	struct rpc_rqst	*req = task->tk_rqstp;
1380 	struct rpc_xprt *xprt = req->rq_xprt;
1381 
1382 	if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate) ||
1383 	    test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) ||
1384 	    xprt_is_pinned_rqst(req)) {
1385 		spin_lock(&xprt->queue_lock);
1386 		xprt_request_dequeue_transmit_locked(task);
1387 		xprt_request_dequeue_receive_locked(task);
1388 		while (xprt_is_pinned_rqst(req)) {
1389 			set_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
1390 			spin_unlock(&xprt->queue_lock);
1391 			xprt_wait_on_pinned_rqst(req);
1392 			spin_lock(&xprt->queue_lock);
1393 			clear_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
1394 		}
1395 		spin_unlock(&xprt->queue_lock);
1396 	}
1397 }
1398 
1399 /**
1400  * xprt_request_prepare - prepare an encoded request for transport
1401  * @req: pointer to rpc_rqst
1402  *
1403  * Calls into the transport layer to do whatever is needed to prepare
1404  * the request for transmission or receive.
1405  */
1406 void
xprt_request_prepare(struct rpc_rqst * req)1407 xprt_request_prepare(struct rpc_rqst *req)
1408 {
1409 	struct rpc_xprt *xprt = req->rq_xprt;
1410 
1411 	if (xprt->ops->prepare_request)
1412 		xprt->ops->prepare_request(req);
1413 }
1414 
1415 /**
1416  * xprt_request_need_retransmit - Test if a task needs retransmission
1417  * @task: pointer to rpc_task
1418  *
1419  * Test for whether a connection breakage requires the task to retransmit
1420  */
1421 bool
xprt_request_need_retransmit(struct rpc_task * task)1422 xprt_request_need_retransmit(struct rpc_task *task)
1423 {
1424 	return xprt_request_retransmit_after_disconnect(task);
1425 }
1426 
1427 /**
1428  * xprt_prepare_transmit - reserve the transport before sending a request
1429  * @task: RPC task about to send a request
1430  *
1431  */
xprt_prepare_transmit(struct rpc_task * task)1432 bool xprt_prepare_transmit(struct rpc_task *task)
1433 {
1434 	struct rpc_rqst	*req = task->tk_rqstp;
1435 	struct rpc_xprt	*xprt = req->rq_xprt;
1436 
1437 	if (!xprt_lock_write(xprt, task)) {
1438 		/* Race breaker: someone may have transmitted us */
1439 		if (!test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1440 			rpc_wake_up_queued_task_set_status(&xprt->sending,
1441 					task, 0);
1442 		return false;
1443 
1444 	}
1445 	return true;
1446 }
1447 
xprt_end_transmit(struct rpc_task * task)1448 void xprt_end_transmit(struct rpc_task *task)
1449 {
1450 	struct rpc_xprt	*xprt = task->tk_rqstp->rq_xprt;
1451 
1452 	xprt_inject_disconnect(xprt);
1453 	xprt_release_write(xprt, task);
1454 }
1455 
1456 /**
1457  * xprt_request_transmit - send an RPC request on a transport
1458  * @req: pointer to request to transmit
1459  * @snd_task: RPC task that owns the transport lock
1460  *
1461  * This performs the transmission of a single request.
1462  * Note that if the request is not the same as snd_task, then it
1463  * does need to be pinned.
1464  * Returns '0' on success.
1465  */
1466 static int
xprt_request_transmit(struct rpc_rqst * req,struct rpc_task * snd_task)1467 xprt_request_transmit(struct rpc_rqst *req, struct rpc_task *snd_task)
1468 {
1469 	struct rpc_xprt *xprt = req->rq_xprt;
1470 	struct rpc_task *task = req->rq_task;
1471 	unsigned int connect_cookie;
1472 	int is_retrans = RPC_WAS_SENT(task);
1473 	int status;
1474 
1475 	if (!req->rq_bytes_sent) {
1476 		if (xprt_request_data_received(task)) {
1477 			status = 0;
1478 			goto out_dequeue;
1479 		}
1480 		/* Verify that our message lies in the RPCSEC_GSS window */
1481 		if (rpcauth_xmit_need_reencode(task)) {
1482 			status = -EBADMSG;
1483 			goto out_dequeue;
1484 		}
1485 		if (RPC_SIGNALLED(task)) {
1486 			status = -ERESTARTSYS;
1487 			goto out_dequeue;
1488 		}
1489 	}
1490 
1491 	/*
1492 	 * Update req->rq_ntrans before transmitting to avoid races with
1493 	 * xprt_update_rtt(), which needs to know that it is recording a
1494 	 * reply to the first transmission.
1495 	 */
1496 	req->rq_ntrans++;
1497 
1498 	trace_rpc_xdr_sendto(task, &req->rq_snd_buf);
1499 	connect_cookie = xprt->connect_cookie;
1500 	status = xprt->ops->send_request(req);
1501 	if (status != 0) {
1502 		req->rq_ntrans--;
1503 		trace_xprt_transmit(req, status);
1504 		return status;
1505 	}
1506 
1507 	if (is_retrans)
1508 		task->tk_client->cl_stats->rpcretrans++;
1509 
1510 	xprt_inject_disconnect(xprt);
1511 
1512 	task->tk_flags |= RPC_TASK_SENT;
1513 	spin_lock(&xprt->transport_lock);
1514 
1515 	xprt->stat.sends++;
1516 	xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs;
1517 	xprt->stat.bklog_u += xprt->backlog.qlen;
1518 	xprt->stat.sending_u += xprt->sending.qlen;
1519 	xprt->stat.pending_u += xprt->pending.qlen;
1520 	spin_unlock(&xprt->transport_lock);
1521 
1522 	req->rq_connect_cookie = connect_cookie;
1523 out_dequeue:
1524 	trace_xprt_transmit(req, status);
1525 	xprt_request_dequeue_transmit(task);
1526 	rpc_wake_up_queued_task_set_status(&xprt->sending, task, status);
1527 	return status;
1528 }
1529 
1530 /**
1531  * xprt_transmit - send an RPC request on a transport
1532  * @task: controlling RPC task
1533  *
1534  * Attempts to drain the transmit queue. On exit, either the transport
1535  * signalled an error that needs to be handled before transmission can
1536  * resume, or @task finished transmitting, and detected that it already
1537  * received a reply.
1538  */
1539 void
xprt_transmit(struct rpc_task * task)1540 xprt_transmit(struct rpc_task *task)
1541 {
1542 	struct rpc_rqst *next, *req = task->tk_rqstp;
1543 	struct rpc_xprt	*xprt = req->rq_xprt;
1544 	int status;
1545 
1546 	spin_lock(&xprt->queue_lock);
1547 	for (;;) {
1548 		next = list_first_entry_or_null(&xprt->xmit_queue,
1549 						struct rpc_rqst, rq_xmit);
1550 		if (!next)
1551 			break;
1552 		xprt_pin_rqst(next);
1553 		spin_unlock(&xprt->queue_lock);
1554 		status = xprt_request_transmit(next, task);
1555 		if (status == -EBADMSG && next != req)
1556 			status = 0;
1557 		spin_lock(&xprt->queue_lock);
1558 		xprt_unpin_rqst(next);
1559 		if (status < 0) {
1560 			if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1561 				task->tk_status = status;
1562 			break;
1563 		}
1564 		/* Was @task transmitted, and has it received a reply? */
1565 		if (xprt_request_data_received(task) &&
1566 		    !test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1567 			break;
1568 		cond_resched_lock(&xprt->queue_lock);
1569 	}
1570 	spin_unlock(&xprt->queue_lock);
1571 }
1572 
xprt_complete_request_init(struct rpc_task * task)1573 static void xprt_complete_request_init(struct rpc_task *task)
1574 {
1575 	if (task->tk_rqstp)
1576 		xprt_request_init(task);
1577 }
1578 
xprt_add_backlog(struct rpc_xprt * xprt,struct rpc_task * task)1579 void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
1580 {
1581 	set_bit(XPRT_CONGESTED, &xprt->state);
1582 	rpc_sleep_on(&xprt->backlog, task, xprt_complete_request_init);
1583 }
1584 EXPORT_SYMBOL_GPL(xprt_add_backlog);
1585 
__xprt_set_rq(struct rpc_task * task,void * data)1586 static bool __xprt_set_rq(struct rpc_task *task, void *data)
1587 {
1588 	struct rpc_rqst *req = data;
1589 
1590 	if (task->tk_rqstp == NULL) {
1591 		memset(req, 0, sizeof(*req));	/* mark unused */
1592 		task->tk_rqstp = req;
1593 		return true;
1594 	}
1595 	return false;
1596 }
1597 
xprt_wake_up_backlog(struct rpc_xprt * xprt,struct rpc_rqst * req)1598 bool xprt_wake_up_backlog(struct rpc_xprt *xprt, struct rpc_rqst *req)
1599 {
1600 	if (rpc_wake_up_first(&xprt->backlog, __xprt_set_rq, req) == NULL) {
1601 		clear_bit(XPRT_CONGESTED, &xprt->state);
1602 		return false;
1603 	}
1604 	return true;
1605 }
1606 EXPORT_SYMBOL_GPL(xprt_wake_up_backlog);
1607 
xprt_throttle_congested(struct rpc_xprt * xprt,struct rpc_task * task)1608 static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task)
1609 {
1610 	bool ret = false;
1611 
1612 	if (!test_bit(XPRT_CONGESTED, &xprt->state))
1613 		goto out;
1614 	spin_lock(&xprt->reserve_lock);
1615 	if (test_bit(XPRT_CONGESTED, &xprt->state)) {
1616 		xprt_add_backlog(xprt, task);
1617 		ret = true;
1618 	}
1619 	spin_unlock(&xprt->reserve_lock);
1620 out:
1621 	return ret;
1622 }
1623 
xprt_dynamic_alloc_slot(struct rpc_xprt * xprt)1624 static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt)
1625 {
1626 	struct rpc_rqst *req = ERR_PTR(-EAGAIN);
1627 	gfp_t gfp_mask = GFP_KERNEL;
1628 
1629 	if (xprt->num_reqs >= xprt->max_reqs)
1630 		goto out;
1631 	++xprt->num_reqs;
1632 	spin_unlock(&xprt->reserve_lock);
1633 	if (current->flags & PF_WQ_WORKER)
1634 		gfp_mask |= __GFP_NORETRY | __GFP_NOWARN;
1635 	req = kzalloc(sizeof(*req), gfp_mask);
1636 	spin_lock(&xprt->reserve_lock);
1637 	if (req != NULL)
1638 		goto out;
1639 	--xprt->num_reqs;
1640 	req = ERR_PTR(-ENOMEM);
1641 out:
1642 	return req;
1643 }
1644 
xprt_dynamic_free_slot(struct rpc_xprt * xprt,struct rpc_rqst * req)1645 static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1646 {
1647 	if (xprt->num_reqs > xprt->min_reqs) {
1648 		--xprt->num_reqs;
1649 		kfree(req);
1650 		return true;
1651 	}
1652 	return false;
1653 }
1654 
xprt_alloc_slot(struct rpc_xprt * xprt,struct rpc_task * task)1655 void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
1656 {
1657 	struct rpc_rqst *req;
1658 
1659 	spin_lock(&xprt->reserve_lock);
1660 	if (!list_empty(&xprt->free)) {
1661 		req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
1662 		list_del(&req->rq_list);
1663 		goto out_init_req;
1664 	}
1665 	req = xprt_dynamic_alloc_slot(xprt);
1666 	if (!IS_ERR(req))
1667 		goto out_init_req;
1668 	switch (PTR_ERR(req)) {
1669 	case -ENOMEM:
1670 		dprintk("RPC:       dynamic allocation of request slot "
1671 				"failed! Retrying\n");
1672 		task->tk_status = -ENOMEM;
1673 		break;
1674 	case -EAGAIN:
1675 		xprt_add_backlog(xprt, task);
1676 		dprintk("RPC:       waiting for request slot\n");
1677 		fallthrough;
1678 	default:
1679 		task->tk_status = -EAGAIN;
1680 	}
1681 	spin_unlock(&xprt->reserve_lock);
1682 	return;
1683 out_init_req:
1684 	xprt->stat.max_slots = max_t(unsigned int, xprt->stat.max_slots,
1685 				     xprt->num_reqs);
1686 	spin_unlock(&xprt->reserve_lock);
1687 
1688 	task->tk_status = 0;
1689 	task->tk_rqstp = req;
1690 }
1691 EXPORT_SYMBOL_GPL(xprt_alloc_slot);
1692 
xprt_free_slot(struct rpc_xprt * xprt,struct rpc_rqst * req)1693 void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1694 {
1695 	spin_lock(&xprt->reserve_lock);
1696 	if (!xprt_wake_up_backlog(xprt, req) &&
1697 	    !xprt_dynamic_free_slot(xprt, req)) {
1698 		memset(req, 0, sizeof(*req));	/* mark unused */
1699 		list_add(&req->rq_list, &xprt->free);
1700 	}
1701 	spin_unlock(&xprt->reserve_lock);
1702 }
1703 EXPORT_SYMBOL_GPL(xprt_free_slot);
1704 
xprt_free_all_slots(struct rpc_xprt * xprt)1705 static void xprt_free_all_slots(struct rpc_xprt *xprt)
1706 {
1707 	struct rpc_rqst *req;
1708 	while (!list_empty(&xprt->free)) {
1709 		req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list);
1710 		list_del(&req->rq_list);
1711 		kfree(req);
1712 	}
1713 }
1714 
xprt_alloc(struct net * net,size_t size,unsigned int num_prealloc,unsigned int max_alloc)1715 struct rpc_xprt *xprt_alloc(struct net *net, size_t size,
1716 		unsigned int num_prealloc,
1717 		unsigned int max_alloc)
1718 {
1719 	struct rpc_xprt *xprt;
1720 	struct rpc_rqst *req;
1721 	int i;
1722 
1723 	xprt = kzalloc(size, GFP_KERNEL);
1724 	if (xprt == NULL)
1725 		goto out;
1726 
1727 	xprt_init(xprt, net);
1728 
1729 	for (i = 0; i < num_prealloc; i++) {
1730 		req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL);
1731 		if (!req)
1732 			goto out_free;
1733 		list_add(&req->rq_list, &xprt->free);
1734 	}
1735 	if (max_alloc > num_prealloc)
1736 		xprt->max_reqs = max_alloc;
1737 	else
1738 		xprt->max_reqs = num_prealloc;
1739 	xprt->min_reqs = num_prealloc;
1740 	xprt->num_reqs = num_prealloc;
1741 
1742 	return xprt;
1743 
1744 out_free:
1745 	xprt_free(xprt);
1746 out:
1747 	return NULL;
1748 }
1749 EXPORT_SYMBOL_GPL(xprt_alloc);
1750 
xprt_free(struct rpc_xprt * xprt)1751 void xprt_free(struct rpc_xprt *xprt)
1752 {
1753 	put_net(xprt->xprt_net);
1754 	xprt_free_all_slots(xprt);
1755 	kfree_rcu(xprt, rcu);
1756 }
1757 EXPORT_SYMBOL_GPL(xprt_free);
1758 
1759 static void
xprt_init_connect_cookie(struct rpc_rqst * req,struct rpc_xprt * xprt)1760 xprt_init_connect_cookie(struct rpc_rqst *req, struct rpc_xprt *xprt)
1761 {
1762 	req->rq_connect_cookie = xprt_connect_cookie(xprt) - 1;
1763 }
1764 
1765 static __be32
xprt_alloc_xid(struct rpc_xprt * xprt)1766 xprt_alloc_xid(struct rpc_xprt *xprt)
1767 {
1768 	__be32 xid;
1769 
1770 	spin_lock(&xprt->reserve_lock);
1771 	xid = (__force __be32)xprt->xid++;
1772 	spin_unlock(&xprt->reserve_lock);
1773 	return xid;
1774 }
1775 
1776 static void
xprt_init_xid(struct rpc_xprt * xprt)1777 xprt_init_xid(struct rpc_xprt *xprt)
1778 {
1779 	xprt->xid = prandom_u32();
1780 }
1781 
1782 static void
xprt_request_init(struct rpc_task * task)1783 xprt_request_init(struct rpc_task *task)
1784 {
1785 	struct rpc_xprt *xprt = task->tk_xprt;
1786 	struct rpc_rqst	*req = task->tk_rqstp;
1787 
1788 	req->rq_task	= task;
1789 	req->rq_xprt    = xprt;
1790 	req->rq_buffer  = NULL;
1791 	req->rq_xid	= xprt_alloc_xid(xprt);
1792 	xprt_init_connect_cookie(req, xprt);
1793 	req->rq_snd_buf.len = 0;
1794 	req->rq_snd_buf.buflen = 0;
1795 	req->rq_rcv_buf.len = 0;
1796 	req->rq_rcv_buf.buflen = 0;
1797 	req->rq_snd_buf.bvec = NULL;
1798 	req->rq_rcv_buf.bvec = NULL;
1799 	req->rq_release_snd_buf = NULL;
1800 	xprt_init_majortimeo(task, req);
1801 
1802 	trace_xprt_reserve(req);
1803 }
1804 
1805 static void
xprt_do_reserve(struct rpc_xprt * xprt,struct rpc_task * task)1806 xprt_do_reserve(struct rpc_xprt *xprt, struct rpc_task *task)
1807 {
1808 	xprt->ops->alloc_slot(xprt, task);
1809 	if (task->tk_rqstp != NULL)
1810 		xprt_request_init(task);
1811 }
1812 
1813 /**
1814  * xprt_reserve - allocate an RPC request slot
1815  * @task: RPC task requesting a slot allocation
1816  *
1817  * If the transport is marked as being congested, or if no more
1818  * slots are available, place the task on the transport's
1819  * backlog queue.
1820  */
xprt_reserve(struct rpc_task * task)1821 void xprt_reserve(struct rpc_task *task)
1822 {
1823 	struct rpc_xprt *xprt = task->tk_xprt;
1824 
1825 	task->tk_status = 0;
1826 	if (task->tk_rqstp != NULL)
1827 		return;
1828 
1829 	task->tk_status = -EAGAIN;
1830 	if (!xprt_throttle_congested(xprt, task))
1831 		xprt_do_reserve(xprt, task);
1832 }
1833 
1834 /**
1835  * xprt_retry_reserve - allocate an RPC request slot
1836  * @task: RPC task requesting a slot allocation
1837  *
1838  * If no more slots are available, place the task on the transport's
1839  * backlog queue.
1840  * Note that the only difference with xprt_reserve is that we now
1841  * ignore the value of the XPRT_CONGESTED flag.
1842  */
xprt_retry_reserve(struct rpc_task * task)1843 void xprt_retry_reserve(struct rpc_task *task)
1844 {
1845 	struct rpc_xprt *xprt = task->tk_xprt;
1846 
1847 	task->tk_status = 0;
1848 	if (task->tk_rqstp != NULL)
1849 		return;
1850 
1851 	task->tk_status = -EAGAIN;
1852 	xprt_do_reserve(xprt, task);
1853 }
1854 
1855 /**
1856  * xprt_release - release an RPC request slot
1857  * @task: task which is finished with the slot
1858  *
1859  */
xprt_release(struct rpc_task * task)1860 void xprt_release(struct rpc_task *task)
1861 {
1862 	struct rpc_xprt	*xprt;
1863 	struct rpc_rqst	*req = task->tk_rqstp;
1864 
1865 	if (req == NULL) {
1866 		if (task->tk_client) {
1867 			xprt = task->tk_xprt;
1868 			xprt_release_write(xprt, task);
1869 		}
1870 		return;
1871 	}
1872 
1873 	xprt = req->rq_xprt;
1874 	xprt_request_dequeue_xprt(task);
1875 	spin_lock(&xprt->transport_lock);
1876 	xprt->ops->release_xprt(xprt, task);
1877 	if (xprt->ops->release_request)
1878 		xprt->ops->release_request(task);
1879 	xprt_schedule_autodisconnect(xprt);
1880 	spin_unlock(&xprt->transport_lock);
1881 	if (req->rq_buffer)
1882 		xprt->ops->buf_free(task);
1883 	xdr_free_bvec(&req->rq_rcv_buf);
1884 	xdr_free_bvec(&req->rq_snd_buf);
1885 	if (req->rq_cred != NULL)
1886 		put_rpccred(req->rq_cred);
1887 	if (req->rq_release_snd_buf)
1888 		req->rq_release_snd_buf(req);
1889 
1890 	task->tk_rqstp = NULL;
1891 	if (likely(!bc_prealloc(req)))
1892 		xprt->ops->free_slot(xprt, req);
1893 	else
1894 		xprt_free_bc_request(req);
1895 }
1896 
1897 #ifdef CONFIG_SUNRPC_BACKCHANNEL
1898 void
xprt_init_bc_request(struct rpc_rqst * req,struct rpc_task * task)1899 xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task)
1900 {
1901 	struct xdr_buf *xbufp = &req->rq_snd_buf;
1902 
1903 	task->tk_rqstp = req;
1904 	req->rq_task = task;
1905 	xprt_init_connect_cookie(req, req->rq_xprt);
1906 	/*
1907 	 * Set up the xdr_buf length.
1908 	 * This also indicates that the buffer is XDR encoded already.
1909 	 */
1910 	xbufp->len = xbufp->head[0].iov_len + xbufp->page_len +
1911 		xbufp->tail[0].iov_len;
1912 }
1913 #endif
1914 
xprt_init(struct rpc_xprt * xprt,struct net * net)1915 static void xprt_init(struct rpc_xprt *xprt, struct net *net)
1916 {
1917 	kref_init(&xprt->kref);
1918 
1919 	spin_lock_init(&xprt->transport_lock);
1920 	spin_lock_init(&xprt->reserve_lock);
1921 	spin_lock_init(&xprt->queue_lock);
1922 
1923 	INIT_LIST_HEAD(&xprt->free);
1924 	xprt->recv_queue = RB_ROOT;
1925 	INIT_LIST_HEAD(&xprt->xmit_queue);
1926 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1927 	spin_lock_init(&xprt->bc_pa_lock);
1928 	INIT_LIST_HEAD(&xprt->bc_pa_list);
1929 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
1930 	INIT_LIST_HEAD(&xprt->xprt_switch);
1931 
1932 	xprt->last_used = jiffies;
1933 	xprt->cwnd = RPC_INITCWND;
1934 	xprt->bind_index = 0;
1935 
1936 	rpc_init_wait_queue(&xprt->binding, "xprt_binding");
1937 	rpc_init_wait_queue(&xprt->pending, "xprt_pending");
1938 	rpc_init_wait_queue(&xprt->sending, "xprt_sending");
1939 	rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");
1940 
1941 	xprt_init_xid(xprt);
1942 
1943 	xprt->xprt_net = get_net(net);
1944 }
1945 
1946 /**
1947  * xprt_create_transport - create an RPC transport
1948  * @args: rpc transport creation arguments
1949  *
1950  */
xprt_create_transport(struct xprt_create * args)1951 struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
1952 {
1953 	struct rpc_xprt	*xprt;
1954 	struct xprt_class *t;
1955 
1956 	spin_lock(&xprt_list_lock);
1957 	list_for_each_entry(t, &xprt_list, list) {
1958 		if (t->ident == args->ident) {
1959 			spin_unlock(&xprt_list_lock);
1960 			goto found;
1961 		}
1962 	}
1963 	spin_unlock(&xprt_list_lock);
1964 	dprintk("RPC: transport (%d) not supported\n", args->ident);
1965 	return ERR_PTR(-EIO);
1966 
1967 found:
1968 	xprt = t->setup(args);
1969 	if (IS_ERR(xprt))
1970 		goto out;
1971 	if (args->flags & XPRT_CREATE_NO_IDLE_TIMEOUT)
1972 		xprt->idle_timeout = 0;
1973 	INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
1974 	if (xprt_has_timer(xprt))
1975 		timer_setup(&xprt->timer, xprt_init_autodisconnect, 0);
1976 	else
1977 		timer_setup(&xprt->timer, NULL, 0);
1978 
1979 	if (strlen(args->servername) > RPC_MAXNETNAMELEN) {
1980 		xprt_destroy(xprt);
1981 		return ERR_PTR(-EINVAL);
1982 	}
1983 	xprt->servername = kstrdup(args->servername, GFP_KERNEL);
1984 	if (xprt->servername == NULL) {
1985 		xprt_destroy(xprt);
1986 		return ERR_PTR(-ENOMEM);
1987 	}
1988 
1989 	rpc_xprt_debugfs_register(xprt);
1990 
1991 	trace_xprt_create(xprt);
1992 out:
1993 	return xprt;
1994 }
1995 
xprt_destroy_cb(struct work_struct * work)1996 static void xprt_destroy_cb(struct work_struct *work)
1997 {
1998 	struct rpc_xprt *xprt =
1999 		container_of(work, struct rpc_xprt, task_cleanup);
2000 
2001 	trace_xprt_destroy(xprt);
2002 
2003 	rpc_xprt_debugfs_unregister(xprt);
2004 	rpc_destroy_wait_queue(&xprt->binding);
2005 	rpc_destroy_wait_queue(&xprt->pending);
2006 	rpc_destroy_wait_queue(&xprt->sending);
2007 	rpc_destroy_wait_queue(&xprt->backlog);
2008 	kfree(xprt->servername);
2009 	/*
2010 	 * Destroy any existing back channel
2011 	 */
2012 	xprt_destroy_backchannel(xprt, UINT_MAX);
2013 
2014 	/*
2015 	 * Tear down transport state and free the rpc_xprt
2016 	 */
2017 	xprt->ops->destroy(xprt);
2018 }
2019 
2020 /**
2021  * xprt_destroy - destroy an RPC transport, killing off all requests.
2022  * @xprt: transport to destroy
2023  *
2024  */
xprt_destroy(struct rpc_xprt * xprt)2025 static void xprt_destroy(struct rpc_xprt *xprt)
2026 {
2027 	/*
2028 	 * Exclude transport connect/disconnect handlers and autoclose
2029 	 */
2030 	wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE);
2031 
2032 	/*
2033 	 * xprt_schedule_autodisconnect() can run after XPRT_LOCKED
2034 	 * is cleared.  We use ->transport_lock to ensure the mod_timer()
2035 	 * can only run *before* del_time_sync(), never after.
2036 	 */
2037 	spin_lock(&xprt->transport_lock);
2038 	del_timer_sync(&xprt->timer);
2039 	spin_unlock(&xprt->transport_lock);
2040 
2041 	/*
2042 	 * Destroy sockets etc from the system workqueue so they can
2043 	 * safely flush receive work running on rpciod.
2044 	 */
2045 	INIT_WORK(&xprt->task_cleanup, xprt_destroy_cb);
2046 	schedule_work(&xprt->task_cleanup);
2047 }
2048 
xprt_destroy_kref(struct kref * kref)2049 static void xprt_destroy_kref(struct kref *kref)
2050 {
2051 	xprt_destroy(container_of(kref, struct rpc_xprt, kref));
2052 }
2053 
2054 /**
2055  * xprt_get - return a reference to an RPC transport.
2056  * @xprt: pointer to the transport
2057  *
2058  */
xprt_get(struct rpc_xprt * xprt)2059 struct rpc_xprt *xprt_get(struct rpc_xprt *xprt)
2060 {
2061 	if (xprt != NULL && kref_get_unless_zero(&xprt->kref))
2062 		return xprt;
2063 	return NULL;
2064 }
2065 EXPORT_SYMBOL_GPL(xprt_get);
2066 
2067 /**
2068  * xprt_put - release a reference to an RPC transport.
2069  * @xprt: pointer to the transport
2070  *
2071  */
xprt_put(struct rpc_xprt * xprt)2072 void xprt_put(struct rpc_xprt *xprt)
2073 {
2074 	if (xprt != NULL)
2075 		kref_put(&xprt->kref, xprt_destroy_kref);
2076 }
2077 EXPORT_SYMBOL_GPL(xprt_put);
2078