• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  linux/net/sunrpc/xprt.c
3  *
4  *  This is a generic RPC call interface supporting congestion avoidance,
5  *  and asynchronous calls.
6  *
7  *  The interface works like this:
8  *
9  *  -	When a process places a call, it allocates a request slot if
10  *	one is available. Otherwise, it sleeps on the backlog queue
11  *	(xprt_reserve).
12  *  -	Next, the caller puts together the RPC message, stuffs it into
13  *	the request struct, and calls xprt_transmit().
14  *  -	xprt_transmit sends the message and installs the caller on the
15  *	transport's wait list. At the same time, if a reply is expected,
16  *	it installs a timer that is run after the packet's timeout has
17  *	expired.
18  *  -	When a packet arrives, the data_ready handler walks the list of
19  *	pending requests for that transport. If a matching XID is found, the
20  *	caller is woken up, and the timer removed.
21  *  -	When no reply arrives within the timeout interval, the timer is
22  *	fired by the kernel and runs xprt_timer(). It either adjusts the
23  *	timeout values (minor timeout) or wakes up the caller with a status
24  *	of -ETIMEDOUT.
25  *  -	When the caller receives a notification from RPC that a reply arrived,
26  *	it should release the RPC slot, and process the reply.
27  *	If the call timed out, it may choose to retry the operation by
28  *	adjusting the initial timeout value, and simply calling rpc_call
29  *	again.
30  *
31  *  Support for async RPC is done through a set of RPC-specific scheduling
32  *  primitives that `transparently' work for processes as well as async
33  *  tasks that rely on callbacks.
34  *
35  *  Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
36  *
37  *  Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com>
38  */
39 
40 #include <linux/module.h>
41 
42 #include <linux/types.h>
43 #include <linux/interrupt.h>
44 #include <linux/workqueue.h>
45 #include <linux/net.h>
46 #include <linux/ktime.h>
47 
48 #include <linux/sunrpc/clnt.h>
49 #include <linux/sunrpc/metrics.h>
50 #include <linux/sunrpc/bc_xprt.h>
51 #include <linux/rcupdate.h>
52 
53 #include <trace/events/sunrpc.h>
54 
55 #include "sunrpc.h"
56 
57 /*
58  * Local variables
59  */
60 
61 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
62 # define RPCDBG_FACILITY	RPCDBG_XPRT
63 #endif
64 
65 /*
66  * Local functions
67  */
68 static void	 xprt_init(struct rpc_xprt *xprt, struct net *net);
69 static void	xprt_request_init(struct rpc_task *, struct rpc_xprt *);
70 static void	xprt_connect_status(struct rpc_task *task);
71 static int      __xprt_get_cong(struct rpc_xprt *, struct rpc_task *);
72 static void     __xprt_put_cong(struct rpc_xprt *, struct rpc_rqst *);
73 static void	 xprt_destroy(struct rpc_xprt *xprt);
74 
75 static DEFINE_SPINLOCK(xprt_list_lock);
76 static LIST_HEAD(xprt_list);
77 
78 /**
79  * xprt_register_transport - register a transport implementation
80  * @transport: transport to register
81  *
82  * If a transport implementation is loaded as a kernel module, it can
83  * call this interface to make itself known to the RPC client.
84  *
85  * Returns:
86  * 0:		transport successfully registered
87  * -EEXIST:	transport already registered
88  * -EINVAL:	transport module being unloaded
89  */
xprt_register_transport(struct xprt_class * transport)90 int xprt_register_transport(struct xprt_class *transport)
91 {
92 	struct xprt_class *t;
93 	int result;
94 
95 	result = -EEXIST;
96 	spin_lock(&xprt_list_lock);
97 	list_for_each_entry(t, &xprt_list, list) {
98 		/* don't register the same transport class twice */
99 		if (t->ident == transport->ident)
100 			goto out;
101 	}
102 
103 	list_add_tail(&transport->list, &xprt_list);
104 	printk(KERN_INFO "RPC: Registered %s transport module.\n",
105 	       transport->name);
106 	result = 0;
107 
108 out:
109 	spin_unlock(&xprt_list_lock);
110 	return result;
111 }
112 EXPORT_SYMBOL_GPL(xprt_register_transport);
113 
114 /**
115  * xprt_unregister_transport - unregister a transport implementation
116  * @transport: transport to unregister
117  *
118  * Returns:
119  * 0:		transport successfully unregistered
120  * -ENOENT:	transport never registered
121  */
xprt_unregister_transport(struct xprt_class * transport)122 int xprt_unregister_transport(struct xprt_class *transport)
123 {
124 	struct xprt_class *t;
125 	int result;
126 
127 	result = 0;
128 	spin_lock(&xprt_list_lock);
129 	list_for_each_entry(t, &xprt_list, list) {
130 		if (t == transport) {
131 			printk(KERN_INFO
132 				"RPC: Unregistered %s transport module.\n",
133 				transport->name);
134 			list_del_init(&transport->list);
135 			goto out;
136 		}
137 	}
138 	result = -ENOENT;
139 
140 out:
141 	spin_unlock(&xprt_list_lock);
142 	return result;
143 }
144 EXPORT_SYMBOL_GPL(xprt_unregister_transport);
145 
146 /**
147  * xprt_load_transport - load a transport implementation
148  * @transport_name: transport to load
149  *
150  * Returns:
151  * 0:		transport successfully loaded
152  * -ENOENT:	transport module not available
153  */
xprt_load_transport(const char * transport_name)154 int xprt_load_transport(const char *transport_name)
155 {
156 	struct xprt_class *t;
157 	int result;
158 
159 	result = 0;
160 	spin_lock(&xprt_list_lock);
161 	list_for_each_entry(t, &xprt_list, list) {
162 		if (strcmp(t->name, transport_name) == 0) {
163 			spin_unlock(&xprt_list_lock);
164 			goto out;
165 		}
166 	}
167 	spin_unlock(&xprt_list_lock);
168 	result = request_module("xprt%s", transport_name);
169 out:
170 	return result;
171 }
172 EXPORT_SYMBOL_GPL(xprt_load_transport);
173 
174 /**
175  * xprt_reserve_xprt - serialize write access to transports
176  * @task: task that is requesting access to the transport
177  * @xprt: pointer to the target transport
178  *
179  * This prevents mixing the payload of separate requests, and prevents
180  * transport connects from colliding with writes.  No congestion control
181  * is provided.
182  */
xprt_reserve_xprt(struct rpc_xprt * xprt,struct rpc_task * task)183 int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
184 {
185 	struct rpc_rqst *req = task->tk_rqstp;
186 	int priority;
187 
188 	if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
189 		if (task == xprt->snd_task)
190 			return 1;
191 		goto out_sleep;
192 	}
193 	xprt->snd_task = task;
194 	if (req != NULL)
195 		req->rq_ntrans++;
196 
197 	return 1;
198 
199 out_sleep:
200 	dprintk("RPC: %5u failed to lock transport %p\n",
201 			task->tk_pid, xprt);
202 	task->tk_timeout = 0;
203 	task->tk_status = -EAGAIN;
204 	if (req == NULL)
205 		priority = RPC_PRIORITY_LOW;
206 	else if (!req->rq_ntrans)
207 		priority = RPC_PRIORITY_NORMAL;
208 	else
209 		priority = RPC_PRIORITY_HIGH;
210 	rpc_sleep_on_priority(&xprt->sending, task, NULL, priority);
211 	return 0;
212 }
213 EXPORT_SYMBOL_GPL(xprt_reserve_xprt);
214 
xprt_clear_locked(struct rpc_xprt * xprt)215 static void xprt_clear_locked(struct rpc_xprt *xprt)
216 {
217 	xprt->snd_task = NULL;
218 	if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
219 		smp_mb__before_atomic();
220 		clear_bit(XPRT_LOCKED, &xprt->state);
221 		smp_mb__after_atomic();
222 	} else
223 		queue_work(xprtiod_workqueue, &xprt->task_cleanup);
224 }
225 
226 /*
227  * xprt_reserve_xprt_cong - serialize write access to transports
228  * @task: task that is requesting access to the transport
229  *
230  * Same as xprt_reserve_xprt, but Van Jacobson congestion control is
231  * integrated into the decision of whether a request is allowed to be
232  * woken up and given access to the transport.
233  */
xprt_reserve_xprt_cong(struct rpc_xprt * xprt,struct rpc_task * task)234 int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
235 {
236 	struct rpc_rqst *req = task->tk_rqstp;
237 	int priority;
238 
239 	if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
240 		if (task == xprt->snd_task)
241 			return 1;
242 		goto out_sleep;
243 	}
244 	if (req == NULL) {
245 		xprt->snd_task = task;
246 		return 1;
247 	}
248 	if (__xprt_get_cong(xprt, task)) {
249 		xprt->snd_task = task;
250 		req->rq_ntrans++;
251 		return 1;
252 	}
253 	xprt_clear_locked(xprt);
254 out_sleep:
255 	if (req)
256 		__xprt_put_cong(xprt, req);
257 	dprintk("RPC: %5u failed to lock transport %p\n", task->tk_pid, xprt);
258 	task->tk_timeout = 0;
259 	task->tk_status = -EAGAIN;
260 	if (req == NULL)
261 		priority = RPC_PRIORITY_LOW;
262 	else if (!req->rq_ntrans)
263 		priority = RPC_PRIORITY_NORMAL;
264 	else
265 		priority = RPC_PRIORITY_HIGH;
266 	rpc_sleep_on_priority(&xprt->sending, task, NULL, priority);
267 	return 0;
268 }
269 EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong);
270 
xprt_lock_write(struct rpc_xprt * xprt,struct rpc_task * task)271 static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
272 {
273 	int retval;
274 
275 	spin_lock_bh(&xprt->transport_lock);
276 	retval = xprt->ops->reserve_xprt(xprt, task);
277 	spin_unlock_bh(&xprt->transport_lock);
278 	return retval;
279 }
280 
__xprt_lock_write_func(struct rpc_task * task,void * data)281 static bool __xprt_lock_write_func(struct rpc_task *task, void *data)
282 {
283 	struct rpc_xprt *xprt = data;
284 	struct rpc_rqst *req;
285 
286 	req = task->tk_rqstp;
287 	xprt->snd_task = task;
288 	if (req)
289 		req->rq_ntrans++;
290 	return true;
291 }
292 
__xprt_lock_write_next(struct rpc_xprt * xprt)293 static void __xprt_lock_write_next(struct rpc_xprt *xprt)
294 {
295 	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
296 		return;
297 
298 	if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending,
299 				__xprt_lock_write_func, xprt))
300 		return;
301 	xprt_clear_locked(xprt);
302 }
303 
__xprt_lock_write_cong_func(struct rpc_task * task,void * data)304 static bool __xprt_lock_write_cong_func(struct rpc_task *task, void *data)
305 {
306 	struct rpc_xprt *xprt = data;
307 	struct rpc_rqst *req;
308 
309 	req = task->tk_rqstp;
310 	if (req == NULL) {
311 		xprt->snd_task = task;
312 		return true;
313 	}
314 	if (__xprt_get_cong(xprt, task)) {
315 		xprt->snd_task = task;
316 		req->rq_ntrans++;
317 		return true;
318 	}
319 	return false;
320 }
321 
__xprt_lock_write_next_cong(struct rpc_xprt * xprt)322 static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
323 {
324 	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
325 		return;
326 	if (RPCXPRT_CONGESTED(xprt))
327 		goto out_unlock;
328 	if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending,
329 				__xprt_lock_write_cong_func, xprt))
330 		return;
331 out_unlock:
332 	xprt_clear_locked(xprt);
333 }
334 
xprt_task_clear_bytes_sent(struct rpc_task * task)335 static void xprt_task_clear_bytes_sent(struct rpc_task *task)
336 {
337 	if (task != NULL) {
338 		struct rpc_rqst *req = task->tk_rqstp;
339 		if (req != NULL)
340 			req->rq_bytes_sent = 0;
341 	}
342 }
343 
344 /**
345  * xprt_release_xprt - allow other requests to use a transport
346  * @xprt: transport with other tasks potentially waiting
347  * @task: task that is releasing access to the transport
348  *
349  * Note that "task" can be NULL.  No congestion control is provided.
350  */
xprt_release_xprt(struct rpc_xprt * xprt,struct rpc_task * task)351 void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
352 {
353 	if (xprt->snd_task == task) {
354 		xprt_task_clear_bytes_sent(task);
355 		xprt_clear_locked(xprt);
356 		__xprt_lock_write_next(xprt);
357 	}
358 }
359 EXPORT_SYMBOL_GPL(xprt_release_xprt);
360 
361 /**
362  * xprt_release_xprt_cong - allow other requests to use a transport
363  * @xprt: transport with other tasks potentially waiting
364  * @task: task that is releasing access to the transport
365  *
366  * Note that "task" can be NULL.  Another task is awoken to use the
367  * transport if the transport's congestion window allows it.
368  */
xprt_release_xprt_cong(struct rpc_xprt * xprt,struct rpc_task * task)369 void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
370 {
371 	if (xprt->snd_task == task) {
372 		xprt_task_clear_bytes_sent(task);
373 		xprt_clear_locked(xprt);
374 		__xprt_lock_write_next_cong(xprt);
375 	}
376 }
377 EXPORT_SYMBOL_GPL(xprt_release_xprt_cong);
378 
xprt_release_write(struct rpc_xprt * xprt,struct rpc_task * task)379 static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
380 {
381 	spin_lock_bh(&xprt->transport_lock);
382 	xprt->ops->release_xprt(xprt, task);
383 	spin_unlock_bh(&xprt->transport_lock);
384 }
385 
386 /*
387  * Van Jacobson congestion avoidance. Check if the congestion window
388  * overflowed. Put the task to sleep if this is the case.
389  */
390 static int
__xprt_get_cong(struct rpc_xprt * xprt,struct rpc_task * task)391 __xprt_get_cong(struct rpc_xprt *xprt, struct rpc_task *task)
392 {
393 	struct rpc_rqst *req = task->tk_rqstp;
394 
395 	if (req->rq_cong)
396 		return 1;
397 	dprintk("RPC: %5u xprt_cwnd_limited cong = %lu cwnd = %lu\n",
398 			task->tk_pid, xprt->cong, xprt->cwnd);
399 	if (RPCXPRT_CONGESTED(xprt))
400 		return 0;
401 	req->rq_cong = 1;
402 	xprt->cong += RPC_CWNDSCALE;
403 	return 1;
404 }
405 
406 /*
407  * Adjust the congestion window, and wake up the next task
408  * that has been sleeping due to congestion
409  */
410 static void
__xprt_put_cong(struct rpc_xprt * xprt,struct rpc_rqst * req)411 __xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
412 {
413 	if (!req->rq_cong)
414 		return;
415 	req->rq_cong = 0;
416 	xprt->cong -= RPC_CWNDSCALE;
417 	__xprt_lock_write_next_cong(xprt);
418 }
419 
420 /**
421  * xprt_release_rqst_cong - housekeeping when request is complete
422  * @task: RPC request that recently completed
423  *
424  * Useful for transports that require congestion control.
425  */
xprt_release_rqst_cong(struct rpc_task * task)426 void xprt_release_rqst_cong(struct rpc_task *task)
427 {
428 	struct rpc_rqst *req = task->tk_rqstp;
429 
430 	__xprt_put_cong(req->rq_xprt, req);
431 }
432 EXPORT_SYMBOL_GPL(xprt_release_rqst_cong);
433 
434 /**
435  * xprt_adjust_cwnd - adjust transport congestion window
436  * @xprt: pointer to xprt
437  * @task: recently completed RPC request used to adjust window
438  * @result: result code of completed RPC request
439  *
440  * The transport code maintains an estimate on the maximum number of out-
441  * standing RPC requests, using a smoothed version of the congestion
442  * avoidance implemented in 44BSD. This is basically the Van Jacobson
443  * congestion algorithm: If a retransmit occurs, the congestion window is
444  * halved; otherwise, it is incremented by 1/cwnd when
445  *
446  *	-	a reply is received and
447  *	-	a full number of requests are outstanding and
448  *	-	the congestion window hasn't been updated recently.
449  */
xprt_adjust_cwnd(struct rpc_xprt * xprt,struct rpc_task * task,int result)450 void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result)
451 {
452 	struct rpc_rqst *req = task->tk_rqstp;
453 	unsigned long cwnd = xprt->cwnd;
454 
455 	if (result >= 0 && cwnd <= xprt->cong) {
456 		/* The (cwnd >> 1) term makes sure
457 		 * the result gets rounded properly. */
458 		cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd;
459 		if (cwnd > RPC_MAXCWND(xprt))
460 			cwnd = RPC_MAXCWND(xprt);
461 		__xprt_lock_write_next_cong(xprt);
462 	} else if (result == -ETIMEDOUT) {
463 		cwnd >>= 1;
464 		if (cwnd < RPC_CWNDSCALE)
465 			cwnd = RPC_CWNDSCALE;
466 	}
467 	dprintk("RPC:       cong %ld, cwnd was %ld, now %ld\n",
468 			xprt->cong, xprt->cwnd, cwnd);
469 	xprt->cwnd = cwnd;
470 	__xprt_put_cong(xprt, req);
471 }
472 EXPORT_SYMBOL_GPL(xprt_adjust_cwnd);
473 
474 /**
475  * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue
476  * @xprt: transport with waiting tasks
477  * @status: result code to plant in each task before waking it
478  *
479  */
xprt_wake_pending_tasks(struct rpc_xprt * xprt,int status)480 void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status)
481 {
482 	if (status < 0)
483 		rpc_wake_up_status(&xprt->pending, status);
484 	else
485 		rpc_wake_up(&xprt->pending);
486 }
487 EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks);
488 
489 /**
490  * xprt_wait_for_buffer_space - wait for transport output buffer to clear
491  * @task: task to be put to sleep
492  * @action: function pointer to be executed after wait
493  *
494  * Note that we only set the timer for the case of RPC_IS_SOFT(), since
495  * we don't in general want to force a socket disconnection due to
496  * an incomplete RPC call transmission.
497  */
xprt_wait_for_buffer_space(struct rpc_task * task,rpc_action action)498 void xprt_wait_for_buffer_space(struct rpc_task *task, rpc_action action)
499 {
500 	struct rpc_rqst *req = task->tk_rqstp;
501 	struct rpc_xprt *xprt = req->rq_xprt;
502 
503 	task->tk_timeout = RPC_IS_SOFT(task) ? req->rq_timeout : 0;
504 	rpc_sleep_on(&xprt->pending, task, action);
505 }
506 EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space);
507 
508 /**
509  * xprt_write_space - wake the task waiting for transport output buffer space
510  * @xprt: transport with waiting tasks
511  *
512  * Can be called in a soft IRQ context, so xprt_write_space never sleeps.
513  */
xprt_write_space(struct rpc_xprt * xprt)514 void xprt_write_space(struct rpc_xprt *xprt)
515 {
516 	spin_lock_bh(&xprt->transport_lock);
517 	if (xprt->snd_task) {
518 		dprintk("RPC:       write space: waking waiting task on "
519 				"xprt %p\n", xprt);
520 		rpc_wake_up_queued_task(&xprt->pending, xprt->snd_task);
521 	}
522 	spin_unlock_bh(&xprt->transport_lock);
523 }
524 EXPORT_SYMBOL_GPL(xprt_write_space);
525 
526 /**
527  * xprt_set_retrans_timeout_def - set a request's retransmit timeout
528  * @task: task whose timeout is to be set
529  *
530  * Set a request's retransmit timeout based on the transport's
531  * default timeout parameters.  Used by transports that don't adjust
532  * the retransmit timeout based on round-trip time estimation.
533  */
xprt_set_retrans_timeout_def(struct rpc_task * task)534 void xprt_set_retrans_timeout_def(struct rpc_task *task)
535 {
536 	task->tk_timeout = task->tk_rqstp->rq_timeout;
537 }
538 EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_def);
539 
540 /**
541  * xprt_set_retrans_timeout_rtt - set a request's retransmit timeout
542  * @task: task whose timeout is to be set
543  *
544  * Set a request's retransmit timeout using the RTT estimator.
545  */
xprt_set_retrans_timeout_rtt(struct rpc_task * task)546 void xprt_set_retrans_timeout_rtt(struct rpc_task *task)
547 {
548 	int timer = task->tk_msg.rpc_proc->p_timer;
549 	struct rpc_clnt *clnt = task->tk_client;
550 	struct rpc_rtt *rtt = clnt->cl_rtt;
551 	struct rpc_rqst *req = task->tk_rqstp;
552 	unsigned long max_timeout = clnt->cl_timeout->to_maxval;
553 
554 	task->tk_timeout = rpc_calc_rto(rtt, timer);
555 	task->tk_timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries;
556 	if (task->tk_timeout > max_timeout || task->tk_timeout == 0)
557 		task->tk_timeout = max_timeout;
558 }
559 EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_rtt);
560 
xprt_reset_majortimeo(struct rpc_rqst * req)561 static void xprt_reset_majortimeo(struct rpc_rqst *req)
562 {
563 	const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
564 
565 	req->rq_majortimeo = req->rq_timeout;
566 	if (to->to_exponential)
567 		req->rq_majortimeo <<= to->to_retries;
568 	else
569 		req->rq_majortimeo += to->to_increment * to->to_retries;
570 	if (req->rq_majortimeo > to->to_maxval || req->rq_majortimeo == 0)
571 		req->rq_majortimeo = to->to_maxval;
572 	req->rq_majortimeo += jiffies;
573 }
574 
575 /**
576  * xprt_adjust_timeout - adjust timeout values for next retransmit
577  * @req: RPC request containing parameters to use for the adjustment
578  *
579  */
xprt_adjust_timeout(struct rpc_rqst * req)580 int xprt_adjust_timeout(struct rpc_rqst *req)
581 {
582 	struct rpc_xprt *xprt = req->rq_xprt;
583 	const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
584 	int status = 0;
585 
586 	if (time_before(jiffies, req->rq_majortimeo)) {
587 		if (to->to_exponential)
588 			req->rq_timeout <<= 1;
589 		else
590 			req->rq_timeout += to->to_increment;
591 		if (to->to_maxval && req->rq_timeout >= to->to_maxval)
592 			req->rq_timeout = to->to_maxval;
593 		req->rq_retries++;
594 	} else {
595 		req->rq_timeout = to->to_initval;
596 		req->rq_retries = 0;
597 		xprt_reset_majortimeo(req);
598 		/* Reset the RTT counters == "slow start" */
599 		spin_lock_bh(&xprt->transport_lock);
600 		rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval);
601 		spin_unlock_bh(&xprt->transport_lock);
602 		status = -ETIMEDOUT;
603 	}
604 
605 	if (req->rq_timeout == 0) {
606 		printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n");
607 		req->rq_timeout = 5 * HZ;
608 	}
609 	return status;
610 }
611 
xprt_autoclose(struct work_struct * work)612 static void xprt_autoclose(struct work_struct *work)
613 {
614 	struct rpc_xprt *xprt =
615 		container_of(work, struct rpc_xprt, task_cleanup);
616 
617 	clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
618 	xprt->ops->close(xprt);
619 	xprt_release_write(xprt, NULL);
620 	wake_up_bit(&xprt->state, XPRT_LOCKED);
621 }
622 
623 /**
624  * xprt_disconnect_done - mark a transport as disconnected
625  * @xprt: transport to flag for disconnect
626  *
627  */
xprt_disconnect_done(struct rpc_xprt * xprt)628 void xprt_disconnect_done(struct rpc_xprt *xprt)
629 {
630 	dprintk("RPC:       disconnected transport %p\n", xprt);
631 	spin_lock_bh(&xprt->transport_lock);
632 	xprt_clear_connected(xprt);
633 	xprt_wake_pending_tasks(xprt, -EAGAIN);
634 	spin_unlock_bh(&xprt->transport_lock);
635 }
636 EXPORT_SYMBOL_GPL(xprt_disconnect_done);
637 
638 /**
639  * xprt_force_disconnect - force a transport to disconnect
640  * @xprt: transport to disconnect
641  *
642  */
xprt_force_disconnect(struct rpc_xprt * xprt)643 void xprt_force_disconnect(struct rpc_xprt *xprt)
644 {
645 	/* Don't race with the test_bit() in xprt_clear_locked() */
646 	spin_lock_bh(&xprt->transport_lock);
647 	set_bit(XPRT_CLOSE_WAIT, &xprt->state);
648 	/* Try to schedule an autoclose RPC call */
649 	if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
650 		queue_work(xprtiod_workqueue, &xprt->task_cleanup);
651 	xprt_wake_pending_tasks(xprt, -EAGAIN);
652 	spin_unlock_bh(&xprt->transport_lock);
653 }
654 EXPORT_SYMBOL_GPL(xprt_force_disconnect);
655 
656 /**
657  * xprt_conditional_disconnect - force a transport to disconnect
658  * @xprt: transport to disconnect
659  * @cookie: 'connection cookie'
660  *
661  * This attempts to break the connection if and only if 'cookie' matches
662  * the current transport 'connection cookie'. It ensures that we don't
663  * try to break the connection more than once when we need to retransmit
664  * a batch of RPC requests.
665  *
666  */
xprt_conditional_disconnect(struct rpc_xprt * xprt,unsigned int cookie)667 void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie)
668 {
669 	/* Don't race with the test_bit() in xprt_clear_locked() */
670 	spin_lock_bh(&xprt->transport_lock);
671 	if (cookie != xprt->connect_cookie)
672 		goto out;
673 	if (test_bit(XPRT_CLOSING, &xprt->state))
674 		goto out;
675 	set_bit(XPRT_CLOSE_WAIT, &xprt->state);
676 	/* Try to schedule an autoclose RPC call */
677 	if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
678 		queue_work(xprtiod_workqueue, &xprt->task_cleanup);
679 	xprt_wake_pending_tasks(xprt, -EAGAIN);
680 out:
681 	spin_unlock_bh(&xprt->transport_lock);
682 }
683 
684 static bool
xprt_has_timer(const struct rpc_xprt * xprt)685 xprt_has_timer(const struct rpc_xprt *xprt)
686 {
687 	return xprt->idle_timeout != 0;
688 }
689 
690 static void
xprt_schedule_autodisconnect(struct rpc_xprt * xprt)691 xprt_schedule_autodisconnect(struct rpc_xprt *xprt)
692 	__must_hold(&xprt->transport_lock)
693 {
694 	if (list_empty(&xprt->recv) && xprt_has_timer(xprt))
695 		mod_timer(&xprt->timer, xprt->last_used + xprt->idle_timeout);
696 }
697 
698 static void
xprt_init_autodisconnect(unsigned long data)699 xprt_init_autodisconnect(unsigned long data)
700 {
701 	struct rpc_xprt *xprt = (struct rpc_xprt *)data;
702 
703 	spin_lock(&xprt->transport_lock);
704 	if (!list_empty(&xprt->recv))
705 		goto out_abort;
706 	/* Reset xprt->last_used to avoid connect/autodisconnect cycling */
707 	xprt->last_used = jiffies;
708 	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
709 		goto out_abort;
710 	spin_unlock(&xprt->transport_lock);
711 	queue_work(xprtiod_workqueue, &xprt->task_cleanup);
712 	return;
713 out_abort:
714 	spin_unlock(&xprt->transport_lock);
715 }
716 
xprt_lock_connect(struct rpc_xprt * xprt,struct rpc_task * task,void * cookie)717 bool xprt_lock_connect(struct rpc_xprt *xprt,
718 		struct rpc_task *task,
719 		void *cookie)
720 {
721 	bool ret = false;
722 
723 	spin_lock_bh(&xprt->transport_lock);
724 	if (!test_bit(XPRT_LOCKED, &xprt->state))
725 		goto out;
726 	if (xprt->snd_task != task)
727 		goto out;
728 	xprt_task_clear_bytes_sent(task);
729 	xprt->snd_task = cookie;
730 	ret = true;
731 out:
732 	spin_unlock_bh(&xprt->transport_lock);
733 	return ret;
734 }
735 
xprt_unlock_connect(struct rpc_xprt * xprt,void * cookie)736 void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie)
737 {
738 	spin_lock_bh(&xprt->transport_lock);
739 	if (xprt->snd_task != cookie)
740 		goto out;
741 	if (!test_bit(XPRT_LOCKED, &xprt->state))
742 		goto out;
743 	xprt->snd_task =NULL;
744 	xprt->ops->release_xprt(xprt, NULL);
745 	xprt_schedule_autodisconnect(xprt);
746 out:
747 	spin_unlock_bh(&xprt->transport_lock);
748 	wake_up_bit(&xprt->state, XPRT_LOCKED);
749 }
750 
751 /**
752  * xprt_connect - schedule a transport connect operation
753  * @task: RPC task that is requesting the connect
754  *
755  */
xprt_connect(struct rpc_task * task)756 void xprt_connect(struct rpc_task *task)
757 {
758 	struct rpc_xprt	*xprt = task->tk_rqstp->rq_xprt;
759 
760 	dprintk("RPC: %5u xprt_connect xprt %p %s connected\n", task->tk_pid,
761 			xprt, (xprt_connected(xprt) ? "is" : "is not"));
762 
763 	if (!xprt_bound(xprt)) {
764 		task->tk_status = -EAGAIN;
765 		return;
766 	}
767 	if (!xprt_lock_write(xprt, task))
768 		return;
769 
770 	if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state))
771 		xprt->ops->close(xprt);
772 
773 	if (!xprt_connected(xprt)) {
774 		task->tk_rqstp->rq_bytes_sent = 0;
775 		task->tk_timeout = task->tk_rqstp->rq_timeout;
776 		task->tk_rqstp->rq_connect_cookie = xprt->connect_cookie;
777 		rpc_sleep_on(&xprt->pending, task, xprt_connect_status);
778 
779 		if (test_bit(XPRT_CLOSING, &xprt->state))
780 			return;
781 		if (xprt_test_and_set_connecting(xprt))
782 			return;
783 		/* Race breaker */
784 		if (!xprt_connected(xprt)) {
785 			xprt->stat.connect_start = jiffies;
786 			xprt->ops->connect(xprt, task);
787 		} else {
788 			xprt_clear_connecting(xprt);
789 			task->tk_status = 0;
790 			rpc_wake_up_queued_task(&xprt->pending, task);
791 		}
792 	}
793 	xprt_release_write(xprt, task);
794 }
795 
xprt_connect_status(struct rpc_task * task)796 static void xprt_connect_status(struct rpc_task *task)
797 {
798 	switch (task->tk_status) {
799 	case 0:
800 		dprintk("RPC: %5u xprt_connect_status: connection established\n",
801 				task->tk_pid);
802 		break;
803 	case -ECONNREFUSED:
804 	case -ECONNRESET:
805 	case -ECONNABORTED:
806 	case -ENETUNREACH:
807 	case -EHOSTUNREACH:
808 	case -EPIPE:
809 	case -EAGAIN:
810 		dprintk("RPC: %5u xprt_connect_status: retrying\n", task->tk_pid);
811 		break;
812 	case -ETIMEDOUT:
813 		dprintk("RPC: %5u xprt_connect_status: connect attempt timed "
814 				"out\n", task->tk_pid);
815 		break;
816 	default:
817 		dprintk("RPC: %5u xprt_connect_status: error %d connecting to "
818 				"server %s\n", task->tk_pid, -task->tk_status,
819 				task->tk_rqstp->rq_xprt->servername);
820 		task->tk_status = -EIO;
821 	}
822 }
823 
824 /**
825  * xprt_lookup_rqst - find an RPC request corresponding to an XID
826  * @xprt: transport on which the original request was transmitted
827  * @xid: RPC XID of incoming reply
828  *
829  */
xprt_lookup_rqst(struct rpc_xprt * xprt,__be32 xid)830 struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
831 {
832 	struct rpc_rqst *entry;
833 
834 	list_for_each_entry(entry, &xprt->recv, rq_list)
835 		if (entry->rq_xid == xid) {
836 			trace_xprt_lookup_rqst(xprt, xid, 0);
837 			return entry;
838 		}
839 
840 	dprintk("RPC:       xprt_lookup_rqst did not find xid %08x\n",
841 			ntohl(xid));
842 	trace_xprt_lookup_rqst(xprt, xid, -ENOENT);
843 	xprt->stat.bad_xids++;
844 	return NULL;
845 }
846 EXPORT_SYMBOL_GPL(xprt_lookup_rqst);
847 
848 /**
849  * xprt_pin_rqst - Pin a request on the transport receive list
850  * @req: Request to pin
851  *
852  * Caller must ensure this is atomic with the call to xprt_lookup_rqst()
853  * so should be holding the xprt transport lock.
854  */
xprt_pin_rqst(struct rpc_rqst * req)855 void xprt_pin_rqst(struct rpc_rqst *req)
856 {
857 	set_bit(RPC_TASK_MSG_RECV, &req->rq_task->tk_runstate);
858 }
859 EXPORT_SYMBOL_GPL(xprt_pin_rqst);
860 
861 /**
862  * xprt_unpin_rqst - Unpin a request on the transport receive list
863  * @req: Request to pin
864  *
865  * Caller should be holding the xprt transport lock.
866  */
xprt_unpin_rqst(struct rpc_rqst * req)867 void xprt_unpin_rqst(struct rpc_rqst *req)
868 {
869 	struct rpc_task *task = req->rq_task;
870 
871 	clear_bit(RPC_TASK_MSG_RECV, &task->tk_runstate);
872 	if (test_bit(RPC_TASK_MSG_RECV_WAIT, &task->tk_runstate))
873 		wake_up_bit(&task->tk_runstate, RPC_TASK_MSG_RECV);
874 }
875 EXPORT_SYMBOL_GPL(xprt_unpin_rqst);
876 
xprt_wait_on_pinned_rqst(struct rpc_rqst * req)877 static void xprt_wait_on_pinned_rqst(struct rpc_rqst *req)
878 __must_hold(&req->rq_xprt->recv_lock)
879 {
880 	struct rpc_task *task = req->rq_task;
881 
882 	if (task && test_bit(RPC_TASK_MSG_RECV, &task->tk_runstate)) {
883 		spin_unlock(&req->rq_xprt->recv_lock);
884 		set_bit(RPC_TASK_MSG_RECV_WAIT, &task->tk_runstate);
885 		wait_on_bit(&task->tk_runstate, RPC_TASK_MSG_RECV,
886 				TASK_UNINTERRUPTIBLE);
887 		clear_bit(RPC_TASK_MSG_RECV_WAIT, &task->tk_runstate);
888 		spin_lock(&req->rq_xprt->recv_lock);
889 	}
890 }
891 
xprt_update_rtt(struct rpc_task * task)892 static void xprt_update_rtt(struct rpc_task *task)
893 {
894 	struct rpc_rqst *req = task->tk_rqstp;
895 	struct rpc_rtt *rtt = task->tk_client->cl_rtt;
896 	unsigned int timer = task->tk_msg.rpc_proc->p_timer;
897 	long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt));
898 
899 	if (timer) {
900 		if (req->rq_ntrans == 1)
901 			rpc_update_rtt(rtt, timer, m);
902 		rpc_set_timeo(rtt, timer, req->rq_ntrans - 1);
903 	}
904 }
905 
906 /**
907  * xprt_complete_rqst - called when reply processing is complete
908  * @task: RPC request that recently completed
909  * @copied: actual number of bytes received from the transport
910  *
911  * Caller holds transport lock.
912  */
xprt_complete_rqst(struct rpc_task * task,int copied)913 void xprt_complete_rqst(struct rpc_task *task, int copied)
914 {
915 	struct rpc_rqst *req = task->tk_rqstp;
916 	struct rpc_xprt *xprt = req->rq_xprt;
917 
918 	dprintk("RPC: %5u xid %08x complete (%d bytes received)\n",
919 			task->tk_pid, ntohl(req->rq_xid), copied);
920 	trace_xprt_complete_rqst(xprt, req->rq_xid, copied);
921 
922 	xprt->stat.recvs++;
923 	req->rq_rtt = ktime_sub(ktime_get(), req->rq_xtime);
924 	if (xprt->ops->timer != NULL)
925 		xprt_update_rtt(task);
926 
927 	list_del_init(&req->rq_list);
928 	req->rq_private_buf.len = copied;
929 	/* Ensure all writes are done before we update */
930 	/* req->rq_reply_bytes_recvd */
931 	smp_wmb();
932 	req->rq_reply_bytes_recvd = copied;
933 	rpc_wake_up_queued_task(&xprt->pending, task);
934 }
935 EXPORT_SYMBOL_GPL(xprt_complete_rqst);
936 
xprt_timer(struct rpc_task * task)937 static void xprt_timer(struct rpc_task *task)
938 {
939 	struct rpc_rqst *req = task->tk_rqstp;
940 	struct rpc_xprt *xprt = req->rq_xprt;
941 
942 	if (task->tk_status != -ETIMEDOUT)
943 		return;
944 	dprintk("RPC: %5u xprt_timer\n", task->tk_pid);
945 
946 	if (!req->rq_reply_bytes_recvd) {
947 		if (xprt->ops->timer)
948 			xprt->ops->timer(xprt, task);
949 	} else
950 		task->tk_status = 0;
951 }
952 
953 /**
954  * xprt_prepare_transmit - reserve the transport before sending a request
955  * @task: RPC task about to send a request
956  *
957  */
xprt_prepare_transmit(struct rpc_task * task)958 bool xprt_prepare_transmit(struct rpc_task *task)
959 {
960 	struct rpc_rqst	*req = task->tk_rqstp;
961 	struct rpc_xprt	*xprt = req->rq_xprt;
962 	bool ret = false;
963 
964 	dprintk("RPC: %5u xprt_prepare_transmit\n", task->tk_pid);
965 
966 	spin_lock_bh(&xprt->transport_lock);
967 	if (!req->rq_bytes_sent) {
968 		if (req->rq_reply_bytes_recvd) {
969 			task->tk_status = req->rq_reply_bytes_recvd;
970 			goto out_unlock;
971 		}
972 		if ((task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT)
973 		    && xprt_connected(xprt)
974 		    && req->rq_connect_cookie == xprt->connect_cookie) {
975 			xprt->ops->set_retrans_timeout(task);
976 			rpc_sleep_on(&xprt->pending, task, xprt_timer);
977 			goto out_unlock;
978 		}
979 	}
980 	if (!xprt->ops->reserve_xprt(xprt, task)) {
981 		task->tk_status = -EAGAIN;
982 		goto out_unlock;
983 	}
984 	ret = true;
985 out_unlock:
986 	spin_unlock_bh(&xprt->transport_lock);
987 	return ret;
988 }
989 
xprt_end_transmit(struct rpc_task * task)990 void xprt_end_transmit(struct rpc_task *task)
991 {
992 	xprt_release_write(task->tk_rqstp->rq_xprt, task);
993 }
994 
995 /**
996  * xprt_transmit - send an RPC request on a transport
997  * @task: controlling RPC task
998  *
999  * We have to copy the iovec because sendmsg fiddles with its contents.
1000  */
xprt_transmit(struct rpc_task * task)1001 void xprt_transmit(struct rpc_task *task)
1002 {
1003 	struct rpc_rqst	*req = task->tk_rqstp;
1004 	struct rpc_xprt	*xprt = req->rq_xprt;
1005 	unsigned int connect_cookie;
1006 	int status, numreqs;
1007 
1008 	dprintk("RPC: %5u xprt_transmit(%u)\n", task->tk_pid, req->rq_slen);
1009 
1010 	if (!req->rq_reply_bytes_recvd) {
1011 		if (list_empty(&req->rq_list) && rpc_reply_expected(task)) {
1012 			/*
1013 			 * Add to the list only if we're expecting a reply
1014 			 */
1015 			/* Update the softirq receive buffer */
1016 			memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
1017 					sizeof(req->rq_private_buf));
1018 			/* Add request to the receive list */
1019 			spin_lock(&xprt->recv_lock);
1020 			list_add_tail(&req->rq_list, &xprt->recv);
1021 			spin_unlock(&xprt->recv_lock);
1022 			xprt_reset_majortimeo(req);
1023 			/* Turn off autodisconnect */
1024 			del_singleshot_timer_sync(&xprt->timer);
1025 		}
1026 	} else if (!req->rq_bytes_sent)
1027 		return;
1028 
1029 	connect_cookie = xprt->connect_cookie;
1030 	req->rq_xtime = ktime_get();
1031 	status = xprt->ops->send_request(task);
1032 	trace_xprt_transmit(xprt, req->rq_xid, status);
1033 	if (status != 0) {
1034 		task->tk_status = status;
1035 		return;
1036 	}
1037 	xprt_inject_disconnect(xprt);
1038 
1039 	dprintk("RPC: %5u xmit complete\n", task->tk_pid);
1040 	task->tk_flags |= RPC_TASK_SENT;
1041 	spin_lock_bh(&xprt->transport_lock);
1042 
1043 	xprt->ops->set_retrans_timeout(task);
1044 
1045 	numreqs = atomic_read(&xprt->num_reqs);
1046 	if (numreqs > xprt->stat.max_slots)
1047 		xprt->stat.max_slots = numreqs;
1048 	xprt->stat.sends++;
1049 	xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs;
1050 	xprt->stat.bklog_u += xprt->backlog.qlen;
1051 	xprt->stat.sending_u += xprt->sending.qlen;
1052 	xprt->stat.pending_u += xprt->pending.qlen;
1053 	spin_unlock_bh(&xprt->transport_lock);
1054 
1055 	req->rq_connect_cookie = connect_cookie;
1056 	if (rpc_reply_expected(task) && !READ_ONCE(req->rq_reply_bytes_recvd)) {
1057 		/*
1058 		 * Sleep on the pending queue if we're expecting a reply.
1059 		 * The spinlock ensures atomicity between the test of
1060 		 * req->rq_reply_bytes_recvd, and the call to rpc_sleep_on().
1061 		 */
1062 		spin_lock(&xprt->recv_lock);
1063 		if (!req->rq_reply_bytes_recvd) {
1064 			rpc_sleep_on(&xprt->pending, task, xprt_timer);
1065 			/*
1066 			 * Send an extra queue wakeup call if the
1067 			 * connection was dropped in case the call to
1068 			 * rpc_sleep_on() raced.
1069 			 */
1070 			if (!xprt_connected(xprt))
1071 				xprt_wake_pending_tasks(xprt, -ENOTCONN);
1072 		}
1073 		spin_unlock(&xprt->recv_lock);
1074 	}
1075 }
1076 
xprt_add_backlog(struct rpc_xprt * xprt,struct rpc_task * task)1077 static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
1078 {
1079 	set_bit(XPRT_CONGESTED, &xprt->state);
1080 	rpc_sleep_on(&xprt->backlog, task, NULL);
1081 }
1082 
xprt_wake_up_backlog(struct rpc_xprt * xprt)1083 static void xprt_wake_up_backlog(struct rpc_xprt *xprt)
1084 {
1085 	if (rpc_wake_up_next(&xprt->backlog) == NULL)
1086 		clear_bit(XPRT_CONGESTED, &xprt->state);
1087 }
1088 
xprt_throttle_congested(struct rpc_xprt * xprt,struct rpc_task * task)1089 static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task)
1090 {
1091 	bool ret = false;
1092 
1093 	if (!test_bit(XPRT_CONGESTED, &xprt->state))
1094 		goto out;
1095 	spin_lock(&xprt->reserve_lock);
1096 	if (test_bit(XPRT_CONGESTED, &xprt->state)) {
1097 		rpc_sleep_on(&xprt->backlog, task, NULL);
1098 		ret = true;
1099 	}
1100 	spin_unlock(&xprt->reserve_lock);
1101 out:
1102 	return ret;
1103 }
1104 
xprt_dynamic_alloc_slot(struct rpc_xprt * xprt)1105 static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt)
1106 {
1107 	struct rpc_rqst *req = ERR_PTR(-EAGAIN);
1108 
1109 	if (!atomic_add_unless(&xprt->num_reqs, 1, xprt->max_reqs))
1110 		goto out;
1111 	spin_unlock(&xprt->reserve_lock);
1112 	req = kzalloc(sizeof(struct rpc_rqst), GFP_NOFS);
1113 	spin_lock(&xprt->reserve_lock);
1114 	if (req != NULL)
1115 		goto out;
1116 	atomic_dec(&xprt->num_reqs);
1117 	req = ERR_PTR(-ENOMEM);
1118 out:
1119 	return req;
1120 }
1121 
xprt_dynamic_free_slot(struct rpc_xprt * xprt,struct rpc_rqst * req)1122 static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1123 {
1124 	if (atomic_add_unless(&xprt->num_reqs, -1, xprt->min_reqs)) {
1125 		kfree(req);
1126 		return true;
1127 	}
1128 	return false;
1129 }
1130 
xprt_alloc_slot(struct rpc_xprt * xprt,struct rpc_task * task)1131 void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
1132 {
1133 	struct rpc_rqst *req;
1134 
1135 	spin_lock(&xprt->reserve_lock);
1136 	if (!list_empty(&xprt->free)) {
1137 		req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
1138 		list_del(&req->rq_list);
1139 		goto out_init_req;
1140 	}
1141 	req = xprt_dynamic_alloc_slot(xprt);
1142 	if (!IS_ERR(req))
1143 		goto out_init_req;
1144 	switch (PTR_ERR(req)) {
1145 	case -ENOMEM:
1146 		dprintk("RPC:       dynamic allocation of request slot "
1147 				"failed! Retrying\n");
1148 		task->tk_status = -ENOMEM;
1149 		break;
1150 	case -EAGAIN:
1151 		xprt_add_backlog(xprt, task);
1152 		dprintk("RPC:       waiting for request slot\n");
1153 	default:
1154 		task->tk_status = -EAGAIN;
1155 	}
1156 	spin_unlock(&xprt->reserve_lock);
1157 	return;
1158 out_init_req:
1159 	task->tk_status = 0;
1160 	task->tk_rqstp = req;
1161 	xprt_request_init(task, xprt);
1162 	spin_unlock(&xprt->reserve_lock);
1163 }
1164 EXPORT_SYMBOL_GPL(xprt_alloc_slot);
1165 
xprt_lock_and_alloc_slot(struct rpc_xprt * xprt,struct rpc_task * task)1166 void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
1167 {
1168 	/* Note: grabbing the xprt_lock_write() ensures that we throttle
1169 	 * new slot allocation if the transport is congested (i.e. when
1170 	 * reconnecting a stream transport or when out of socket write
1171 	 * buffer space).
1172 	 */
1173 	if (xprt_lock_write(xprt, task)) {
1174 		xprt_alloc_slot(xprt, task);
1175 		xprt_release_write(xprt, task);
1176 	}
1177 }
1178 EXPORT_SYMBOL_GPL(xprt_lock_and_alloc_slot);
1179 
xprt_free_slot(struct rpc_xprt * xprt,struct rpc_rqst * req)1180 static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1181 {
1182 	spin_lock(&xprt->reserve_lock);
1183 	if (!xprt_dynamic_free_slot(xprt, req)) {
1184 		memset(req, 0, sizeof(*req));	/* mark unused */
1185 		list_add(&req->rq_list, &xprt->free);
1186 	}
1187 	xprt_wake_up_backlog(xprt);
1188 	spin_unlock(&xprt->reserve_lock);
1189 }
1190 
xprt_free_all_slots(struct rpc_xprt * xprt)1191 static void xprt_free_all_slots(struct rpc_xprt *xprt)
1192 {
1193 	struct rpc_rqst *req;
1194 	while (!list_empty(&xprt->free)) {
1195 		req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list);
1196 		list_del(&req->rq_list);
1197 		kfree(req);
1198 	}
1199 }
1200 
xprt_alloc(struct net * net,size_t size,unsigned int num_prealloc,unsigned int max_alloc)1201 struct rpc_xprt *xprt_alloc(struct net *net, size_t size,
1202 		unsigned int num_prealloc,
1203 		unsigned int max_alloc)
1204 {
1205 	struct rpc_xprt *xprt;
1206 	struct rpc_rqst *req;
1207 	int i;
1208 
1209 	xprt = kzalloc(size, GFP_KERNEL);
1210 	if (xprt == NULL)
1211 		goto out;
1212 
1213 	xprt_init(xprt, net);
1214 
1215 	for (i = 0; i < num_prealloc; i++) {
1216 		req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL);
1217 		if (!req)
1218 			goto out_free;
1219 		list_add(&req->rq_list, &xprt->free);
1220 	}
1221 	if (max_alloc > num_prealloc)
1222 		xprt->max_reqs = max_alloc;
1223 	else
1224 		xprt->max_reqs = num_prealloc;
1225 	xprt->min_reqs = num_prealloc;
1226 	atomic_set(&xprt->num_reqs, num_prealloc);
1227 
1228 	return xprt;
1229 
1230 out_free:
1231 	xprt_free(xprt);
1232 out:
1233 	return NULL;
1234 }
1235 EXPORT_SYMBOL_GPL(xprt_alloc);
1236 
xprt_free(struct rpc_xprt * xprt)1237 void xprt_free(struct rpc_xprt *xprt)
1238 {
1239 	put_net(xprt->xprt_net);
1240 	xprt_free_all_slots(xprt);
1241 	kfree_rcu(xprt, rcu);
1242 }
1243 EXPORT_SYMBOL_GPL(xprt_free);
1244 
1245 /**
1246  * xprt_reserve - allocate an RPC request slot
1247  * @task: RPC task requesting a slot allocation
1248  *
1249  * If the transport is marked as being congested, or if no more
1250  * slots are available, place the task on the transport's
1251  * backlog queue.
1252  */
xprt_reserve(struct rpc_task * task)1253 void xprt_reserve(struct rpc_task *task)
1254 {
1255 	struct rpc_xprt *xprt = task->tk_xprt;
1256 
1257 	task->tk_status = 0;
1258 	if (task->tk_rqstp != NULL)
1259 		return;
1260 
1261 	task->tk_timeout = 0;
1262 	task->tk_status = -EAGAIN;
1263 	if (!xprt_throttle_congested(xprt, task))
1264 		xprt->ops->alloc_slot(xprt, task);
1265 }
1266 
1267 /**
1268  * xprt_retry_reserve - allocate an RPC request slot
1269  * @task: RPC task requesting a slot allocation
1270  *
1271  * If no more slots are available, place the task on the transport's
1272  * backlog queue.
1273  * Note that the only difference with xprt_reserve is that we now
1274  * ignore the value of the XPRT_CONGESTED flag.
1275  */
xprt_retry_reserve(struct rpc_task * task)1276 void xprt_retry_reserve(struct rpc_task *task)
1277 {
1278 	struct rpc_xprt *xprt = task->tk_xprt;
1279 
1280 	task->tk_status = 0;
1281 	if (task->tk_rqstp != NULL)
1282 		return;
1283 
1284 	task->tk_timeout = 0;
1285 	task->tk_status = -EAGAIN;
1286 	xprt->ops->alloc_slot(xprt, task);
1287 }
1288 
xprt_alloc_xid(struct rpc_xprt * xprt)1289 static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt)
1290 {
1291 	return (__force __be32)xprt->xid++;
1292 }
1293 
xprt_init_xid(struct rpc_xprt * xprt)1294 static inline void xprt_init_xid(struct rpc_xprt *xprt)
1295 {
1296 	xprt->xid = prandom_u32();
1297 }
1298 
xprt_request_init(struct rpc_task * task,struct rpc_xprt * xprt)1299 static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
1300 {
1301 	struct rpc_rqst	*req = task->tk_rqstp;
1302 
1303 	INIT_LIST_HEAD(&req->rq_list);
1304 	req->rq_timeout = task->tk_client->cl_timeout->to_initval;
1305 	req->rq_task	= task;
1306 	req->rq_xprt    = xprt;
1307 	req->rq_buffer  = NULL;
1308 	req->rq_xid     = xprt_alloc_xid(xprt);
1309 	req->rq_connect_cookie = xprt->connect_cookie - 1;
1310 	req->rq_bytes_sent = 0;
1311 	req->rq_snd_buf.len = 0;
1312 	req->rq_snd_buf.buflen = 0;
1313 	req->rq_rcv_buf.len = 0;
1314 	req->rq_rcv_buf.buflen = 0;
1315 	req->rq_release_snd_buf = NULL;
1316 	xprt_reset_majortimeo(req);
1317 	dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid,
1318 			req, ntohl(req->rq_xid));
1319 }
1320 
1321 /**
1322  * xprt_release - release an RPC request slot
1323  * @task: task which is finished with the slot
1324  *
1325  */
xprt_release(struct rpc_task * task)1326 void xprt_release(struct rpc_task *task)
1327 {
1328 	struct rpc_xprt	*xprt;
1329 	struct rpc_rqst	*req = task->tk_rqstp;
1330 
1331 	if (req == NULL) {
1332 		if (task->tk_client) {
1333 			xprt = task->tk_xprt;
1334 			if (xprt->snd_task == task)
1335 				xprt_release_write(xprt, task);
1336 		}
1337 		return;
1338 	}
1339 
1340 	xprt = req->rq_xprt;
1341 	if (task->tk_ops->rpc_count_stats != NULL)
1342 		task->tk_ops->rpc_count_stats(task, task->tk_calldata);
1343 	else if (task->tk_client)
1344 		rpc_count_iostats(task, task->tk_client->cl_metrics);
1345 	spin_lock(&xprt->recv_lock);
1346 	if (!list_empty(&req->rq_list)) {
1347 		list_del_init(&req->rq_list);
1348 		xprt_wait_on_pinned_rqst(req);
1349 	}
1350 	spin_unlock(&xprt->recv_lock);
1351 	spin_lock_bh(&xprt->transport_lock);
1352 	xprt->ops->release_xprt(xprt, task);
1353 	if (xprt->ops->release_request)
1354 		xprt->ops->release_request(task);
1355 	xprt->last_used = jiffies;
1356 	xprt_schedule_autodisconnect(xprt);
1357 	spin_unlock_bh(&xprt->transport_lock);
1358 	if (req->rq_buffer)
1359 		xprt->ops->buf_free(task);
1360 	xprt_inject_disconnect(xprt);
1361 	if (req->rq_cred != NULL)
1362 		put_rpccred(req->rq_cred);
1363 	task->tk_rqstp = NULL;
1364 	if (req->rq_release_snd_buf)
1365 		req->rq_release_snd_buf(req);
1366 
1367 	dprintk("RPC: %5u release request %p\n", task->tk_pid, req);
1368 	if (likely(!bc_prealloc(req)))
1369 		xprt_free_slot(xprt, req);
1370 	else
1371 		xprt_free_bc_request(req);
1372 }
1373 
xprt_init(struct rpc_xprt * xprt,struct net * net)1374 static void xprt_init(struct rpc_xprt *xprt, struct net *net)
1375 {
1376 	kref_init(&xprt->kref);
1377 
1378 	spin_lock_init(&xprt->transport_lock);
1379 	spin_lock_init(&xprt->reserve_lock);
1380 	spin_lock_init(&xprt->recv_lock);
1381 
1382 	INIT_LIST_HEAD(&xprt->free);
1383 	INIT_LIST_HEAD(&xprt->recv);
1384 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1385 	spin_lock_init(&xprt->bc_pa_lock);
1386 	INIT_LIST_HEAD(&xprt->bc_pa_list);
1387 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
1388 	INIT_LIST_HEAD(&xprt->xprt_switch);
1389 
1390 	xprt->last_used = jiffies;
1391 	xprt->cwnd = RPC_INITCWND;
1392 	xprt->bind_index = 0;
1393 
1394 	rpc_init_wait_queue(&xprt->binding, "xprt_binding");
1395 	rpc_init_wait_queue(&xprt->pending, "xprt_pending");
1396 	rpc_init_priority_wait_queue(&xprt->sending, "xprt_sending");
1397 	rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");
1398 
1399 	xprt_init_xid(xprt);
1400 
1401 	xprt->xprt_net = get_net(net);
1402 }
1403 
1404 /**
1405  * xprt_create_transport - create an RPC transport
1406  * @args: rpc transport creation arguments
1407  *
1408  */
xprt_create_transport(struct xprt_create * args)1409 struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
1410 {
1411 	struct rpc_xprt	*xprt;
1412 	struct xprt_class *t;
1413 
1414 	spin_lock(&xprt_list_lock);
1415 	list_for_each_entry(t, &xprt_list, list) {
1416 		if (t->ident == args->ident) {
1417 			spin_unlock(&xprt_list_lock);
1418 			goto found;
1419 		}
1420 	}
1421 	spin_unlock(&xprt_list_lock);
1422 	dprintk("RPC: transport (%d) not supported\n", args->ident);
1423 	return ERR_PTR(-EIO);
1424 
1425 found:
1426 	xprt = t->setup(args);
1427 	if (IS_ERR(xprt)) {
1428 		dprintk("RPC:       xprt_create_transport: failed, %ld\n",
1429 				-PTR_ERR(xprt));
1430 		goto out;
1431 	}
1432 	if (args->flags & XPRT_CREATE_NO_IDLE_TIMEOUT)
1433 		xprt->idle_timeout = 0;
1434 	INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
1435 	if (xprt_has_timer(xprt))
1436 		setup_timer(&xprt->timer, xprt_init_autodisconnect,
1437 			    (unsigned long)xprt);
1438 	else
1439 		init_timer(&xprt->timer);
1440 
1441 	if (strlen(args->servername) > RPC_MAXNETNAMELEN) {
1442 		xprt_destroy(xprt);
1443 		return ERR_PTR(-EINVAL);
1444 	}
1445 	xprt->servername = kstrdup(args->servername, GFP_KERNEL);
1446 	if (xprt->servername == NULL) {
1447 		xprt_destroy(xprt);
1448 		return ERR_PTR(-ENOMEM);
1449 	}
1450 
1451 	rpc_xprt_debugfs_register(xprt);
1452 
1453 	dprintk("RPC:       created transport %p with %u slots\n", xprt,
1454 			xprt->max_reqs);
1455 out:
1456 	return xprt;
1457 }
1458 
xprt_destroy_cb(struct work_struct * work)1459 static void xprt_destroy_cb(struct work_struct *work)
1460 {
1461 	struct rpc_xprt *xprt =
1462 		container_of(work, struct rpc_xprt, task_cleanup);
1463 
1464 	rpc_xprt_debugfs_unregister(xprt);
1465 	rpc_destroy_wait_queue(&xprt->binding);
1466 	rpc_destroy_wait_queue(&xprt->pending);
1467 	rpc_destroy_wait_queue(&xprt->sending);
1468 	rpc_destroy_wait_queue(&xprt->backlog);
1469 	kfree(xprt->servername);
1470 	/*
1471 	 * Tear down transport state and free the rpc_xprt
1472 	 */
1473 	xprt->ops->destroy(xprt);
1474 }
1475 
1476 /**
1477  * xprt_destroy - destroy an RPC transport, killing off all requests.
1478  * @xprt: transport to destroy
1479  *
1480  */
xprt_destroy(struct rpc_xprt * xprt)1481 static void xprt_destroy(struct rpc_xprt *xprt)
1482 {
1483 	dprintk("RPC:       destroying transport %p\n", xprt);
1484 
1485 	/*
1486 	 * Exclude transport connect/disconnect handlers and autoclose
1487 	 */
1488 	wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE);
1489 
1490 	del_timer_sync(&xprt->timer);
1491 
1492 	/*
1493 	 * Destroy sockets etc from the system workqueue so they can
1494 	 * safely flush receive work running on rpciod.
1495 	 */
1496 	INIT_WORK(&xprt->task_cleanup, xprt_destroy_cb);
1497 	schedule_work(&xprt->task_cleanup);
1498 }
1499 
xprt_destroy_kref(struct kref * kref)1500 static void xprt_destroy_kref(struct kref *kref)
1501 {
1502 	xprt_destroy(container_of(kref, struct rpc_xprt, kref));
1503 }
1504 
1505 /**
1506  * xprt_get - return a reference to an RPC transport.
1507  * @xprt: pointer to the transport
1508  *
1509  */
xprt_get(struct rpc_xprt * xprt)1510 struct rpc_xprt *xprt_get(struct rpc_xprt *xprt)
1511 {
1512 	if (xprt != NULL && kref_get_unless_zero(&xprt->kref))
1513 		return xprt;
1514 	return NULL;
1515 }
1516 EXPORT_SYMBOL_GPL(xprt_get);
1517 
1518 /**
1519  * xprt_put - release a reference to an RPC transport.
1520  * @xprt: pointer to the transport
1521  *
1522  */
xprt_put(struct rpc_xprt * xprt)1523 void xprt_put(struct rpc_xprt *xprt)
1524 {
1525 	if (xprt != NULL)
1526 		kref_put(&xprt->kref, xprt_destroy_kref);
1527 }
1528 EXPORT_SYMBOL_GPL(xprt_put);
1529