1 /*
2 * linux/net/sunrpc/xprt.c
3 *
4 * This is a generic RPC call interface supporting congestion avoidance,
5 * and asynchronous calls.
6 *
7 * The interface works like this:
8 *
9 * - When a process places a call, it allocates a request slot if
10 * one is available. Otherwise, it sleeps on the backlog queue
11 * (xprt_reserve).
12 * - Next, the caller puts together the RPC message, stuffs it into
13 * the request struct, and calls xprt_transmit().
14 * - xprt_transmit sends the message and installs the caller on the
15 * transport's wait list. At the same time, if a reply is expected,
16 * it installs a timer that is run after the packet's timeout has
17 * expired.
18 * - When a packet arrives, the data_ready handler walks the list of
19 * pending requests for that transport. If a matching XID is found, the
20 * caller is woken up, and the timer removed.
21 * - When no reply arrives within the timeout interval, the timer is
22 * fired by the kernel and runs xprt_timer(). It either adjusts the
23 * timeout values (minor timeout) or wakes up the caller with a status
24 * of -ETIMEDOUT.
25 * - When the caller receives a notification from RPC that a reply arrived,
26 * it should release the RPC slot, and process the reply.
27 * If the call timed out, it may choose to retry the operation by
28 * adjusting the initial timeout value, and simply calling rpc_call
29 * again.
30 *
31 * Support for async RPC is done through a set of RPC-specific scheduling
32 * primitives that `transparently' work for processes as well as async
33 * tasks that rely on callbacks.
34 *
35 * Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
36 *
37 * Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com>
38 */
39
40 #include <linux/module.h>
41
42 #include <linux/types.h>
43 #include <linux/interrupt.h>
44 #include <linux/workqueue.h>
45 #include <linux/net.h>
46 #include <linux/ktime.h>
47
48 #include <linux/sunrpc/clnt.h>
49 #include <linux/sunrpc/metrics.h>
50 #include <linux/sunrpc/bc_xprt.h>
51
52 #include "sunrpc.h"
53
54 /*
55 * Local variables
56 */
57
58 #ifdef RPC_DEBUG
59 # define RPCDBG_FACILITY RPCDBG_XPRT
60 #endif
61
62 /*
63 * Local functions
64 */
65 static void xprt_init(struct rpc_xprt *xprt, struct net *net);
66 static void xprt_request_init(struct rpc_task *, struct rpc_xprt *);
67 static void xprt_connect_status(struct rpc_task *task);
68 static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *);
69 static void xprt_destroy(struct rpc_xprt *xprt);
70
71 static DEFINE_SPINLOCK(xprt_list_lock);
72 static LIST_HEAD(xprt_list);
73
74 /**
75 * xprt_register_transport - register a transport implementation
76 * @transport: transport to register
77 *
78 * If a transport implementation is loaded as a kernel module, it can
79 * call this interface to make itself known to the RPC client.
80 *
81 * Returns:
82 * 0: transport successfully registered
83 * -EEXIST: transport already registered
84 * -EINVAL: transport module being unloaded
85 */
xprt_register_transport(struct xprt_class * transport)86 int xprt_register_transport(struct xprt_class *transport)
87 {
88 struct xprt_class *t;
89 int result;
90
91 result = -EEXIST;
92 spin_lock(&xprt_list_lock);
93 list_for_each_entry(t, &xprt_list, list) {
94 /* don't register the same transport class twice */
95 if (t->ident == transport->ident)
96 goto out;
97 }
98
99 list_add_tail(&transport->list, &xprt_list);
100 printk(KERN_INFO "RPC: Registered %s transport module.\n",
101 transport->name);
102 result = 0;
103
104 out:
105 spin_unlock(&xprt_list_lock);
106 return result;
107 }
108 EXPORT_SYMBOL_GPL(xprt_register_transport);
109
110 /**
111 * xprt_unregister_transport - unregister a transport implementation
112 * @transport: transport to unregister
113 *
114 * Returns:
115 * 0: transport successfully unregistered
116 * -ENOENT: transport never registered
117 */
xprt_unregister_transport(struct xprt_class * transport)118 int xprt_unregister_transport(struct xprt_class *transport)
119 {
120 struct xprt_class *t;
121 int result;
122
123 result = 0;
124 spin_lock(&xprt_list_lock);
125 list_for_each_entry(t, &xprt_list, list) {
126 if (t == transport) {
127 printk(KERN_INFO
128 "RPC: Unregistered %s transport module.\n",
129 transport->name);
130 list_del_init(&transport->list);
131 goto out;
132 }
133 }
134 result = -ENOENT;
135
136 out:
137 spin_unlock(&xprt_list_lock);
138 return result;
139 }
140 EXPORT_SYMBOL_GPL(xprt_unregister_transport);
141
142 /**
143 * xprt_load_transport - load a transport implementation
144 * @transport_name: transport to load
145 *
146 * Returns:
147 * 0: transport successfully loaded
148 * -ENOENT: transport module not available
149 */
xprt_load_transport(const char * transport_name)150 int xprt_load_transport(const char *transport_name)
151 {
152 struct xprt_class *t;
153 int result;
154
155 result = 0;
156 spin_lock(&xprt_list_lock);
157 list_for_each_entry(t, &xprt_list, list) {
158 if (strcmp(t->name, transport_name) == 0) {
159 spin_unlock(&xprt_list_lock);
160 goto out;
161 }
162 }
163 spin_unlock(&xprt_list_lock);
164 result = request_module("xprt%s", transport_name);
165 out:
166 return result;
167 }
168 EXPORT_SYMBOL_GPL(xprt_load_transport);
169
170 /**
171 * xprt_reserve_xprt - serialize write access to transports
172 * @task: task that is requesting access to the transport
173 * @xprt: pointer to the target transport
174 *
175 * This prevents mixing the payload of separate requests, and prevents
176 * transport connects from colliding with writes. No congestion control
177 * is provided.
178 */
xprt_reserve_xprt(struct rpc_xprt * xprt,struct rpc_task * task)179 int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
180 {
181 struct rpc_rqst *req = task->tk_rqstp;
182 int priority;
183
184 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
185 if (task == xprt->snd_task)
186 return 1;
187 goto out_sleep;
188 }
189 xprt->snd_task = task;
190 if (req != NULL)
191 req->rq_ntrans++;
192
193 return 1;
194
195 out_sleep:
196 dprintk("RPC: %5u failed to lock transport %p\n",
197 task->tk_pid, xprt);
198 task->tk_timeout = 0;
199 task->tk_status = -EAGAIN;
200 if (req == NULL)
201 priority = RPC_PRIORITY_LOW;
202 else if (!req->rq_ntrans)
203 priority = RPC_PRIORITY_NORMAL;
204 else
205 priority = RPC_PRIORITY_HIGH;
206 rpc_sleep_on_priority(&xprt->sending, task, NULL, priority);
207 return 0;
208 }
209 EXPORT_SYMBOL_GPL(xprt_reserve_xprt);
210
xprt_clear_locked(struct rpc_xprt * xprt)211 static void xprt_clear_locked(struct rpc_xprt *xprt)
212 {
213 xprt->snd_task = NULL;
214 if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
215 smp_mb__before_atomic();
216 clear_bit(XPRT_LOCKED, &xprt->state);
217 smp_mb__after_atomic();
218 } else
219 queue_work(rpciod_workqueue, &xprt->task_cleanup);
220 }
221
222 /*
223 * xprt_reserve_xprt_cong - serialize write access to transports
224 * @task: task that is requesting access to the transport
225 *
226 * Same as xprt_reserve_xprt, but Van Jacobson congestion control is
227 * integrated into the decision of whether a request is allowed to be
228 * woken up and given access to the transport.
229 */
xprt_reserve_xprt_cong(struct rpc_xprt * xprt,struct rpc_task * task)230 int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
231 {
232 struct rpc_rqst *req = task->tk_rqstp;
233 int priority;
234
235 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
236 if (task == xprt->snd_task)
237 return 1;
238 goto out_sleep;
239 }
240 if (req == NULL) {
241 xprt->snd_task = task;
242 return 1;
243 }
244 if (__xprt_get_cong(xprt, task)) {
245 xprt->snd_task = task;
246 req->rq_ntrans++;
247 return 1;
248 }
249 xprt_clear_locked(xprt);
250 out_sleep:
251 dprintk("RPC: %5u failed to lock transport %p\n", task->tk_pid, xprt);
252 task->tk_timeout = 0;
253 task->tk_status = -EAGAIN;
254 if (req == NULL)
255 priority = RPC_PRIORITY_LOW;
256 else if (!req->rq_ntrans)
257 priority = RPC_PRIORITY_NORMAL;
258 else
259 priority = RPC_PRIORITY_HIGH;
260 rpc_sleep_on_priority(&xprt->sending, task, NULL, priority);
261 return 0;
262 }
263 EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong);
264
xprt_lock_write(struct rpc_xprt * xprt,struct rpc_task * task)265 static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
266 {
267 int retval;
268
269 spin_lock_bh(&xprt->transport_lock);
270 retval = xprt->ops->reserve_xprt(xprt, task);
271 spin_unlock_bh(&xprt->transport_lock);
272 return retval;
273 }
274
__xprt_lock_write_func(struct rpc_task * task,void * data)275 static bool __xprt_lock_write_func(struct rpc_task *task, void *data)
276 {
277 struct rpc_xprt *xprt = data;
278 struct rpc_rqst *req;
279
280 req = task->tk_rqstp;
281 xprt->snd_task = task;
282 if (req)
283 req->rq_ntrans++;
284 return true;
285 }
286
__xprt_lock_write_next(struct rpc_xprt * xprt)287 static void __xprt_lock_write_next(struct rpc_xprt *xprt)
288 {
289 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
290 return;
291
292 if (rpc_wake_up_first(&xprt->sending, __xprt_lock_write_func, xprt))
293 return;
294 xprt_clear_locked(xprt);
295 }
296
__xprt_lock_write_cong_func(struct rpc_task * task,void * data)297 static bool __xprt_lock_write_cong_func(struct rpc_task *task, void *data)
298 {
299 struct rpc_xprt *xprt = data;
300 struct rpc_rqst *req;
301
302 req = task->tk_rqstp;
303 if (req == NULL) {
304 xprt->snd_task = task;
305 return true;
306 }
307 if (__xprt_get_cong(xprt, task)) {
308 xprt->snd_task = task;
309 req->rq_ntrans++;
310 return true;
311 }
312 return false;
313 }
314
__xprt_lock_write_next_cong(struct rpc_xprt * xprt)315 static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
316 {
317 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
318 return;
319 if (RPCXPRT_CONGESTED(xprt))
320 goto out_unlock;
321 if (rpc_wake_up_first(&xprt->sending, __xprt_lock_write_cong_func, xprt))
322 return;
323 out_unlock:
324 xprt_clear_locked(xprt);
325 }
326
327 /**
328 * xprt_release_xprt - allow other requests to use a transport
329 * @xprt: transport with other tasks potentially waiting
330 * @task: task that is releasing access to the transport
331 *
332 * Note that "task" can be NULL. No congestion control is provided.
333 */
xprt_release_xprt(struct rpc_xprt * xprt,struct rpc_task * task)334 void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
335 {
336 if (xprt->snd_task == task) {
337 if (task != NULL) {
338 struct rpc_rqst *req = task->tk_rqstp;
339 if (req != NULL)
340 req->rq_bytes_sent = 0;
341 }
342 xprt_clear_locked(xprt);
343 __xprt_lock_write_next(xprt);
344 }
345 }
346 EXPORT_SYMBOL_GPL(xprt_release_xprt);
347
348 /**
349 * xprt_release_xprt_cong - allow other requests to use a transport
350 * @xprt: transport with other tasks potentially waiting
351 * @task: task that is releasing access to the transport
352 *
353 * Note that "task" can be NULL. Another task is awoken to use the
354 * transport if the transport's congestion window allows it.
355 */
xprt_release_xprt_cong(struct rpc_xprt * xprt,struct rpc_task * task)356 void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
357 {
358 if (xprt->snd_task == task) {
359 if (task != NULL) {
360 struct rpc_rqst *req = task->tk_rqstp;
361 if (req != NULL)
362 req->rq_bytes_sent = 0;
363 }
364 xprt_clear_locked(xprt);
365 __xprt_lock_write_next_cong(xprt);
366 }
367 }
368 EXPORT_SYMBOL_GPL(xprt_release_xprt_cong);
369
xprt_release_write(struct rpc_xprt * xprt,struct rpc_task * task)370 static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
371 {
372 spin_lock_bh(&xprt->transport_lock);
373 xprt->ops->release_xprt(xprt, task);
374 spin_unlock_bh(&xprt->transport_lock);
375 }
376
377 /*
378 * Van Jacobson congestion avoidance. Check if the congestion window
379 * overflowed. Put the task to sleep if this is the case.
380 */
381 static int
__xprt_get_cong(struct rpc_xprt * xprt,struct rpc_task * task)382 __xprt_get_cong(struct rpc_xprt *xprt, struct rpc_task *task)
383 {
384 struct rpc_rqst *req = task->tk_rqstp;
385
386 if (req->rq_cong)
387 return 1;
388 dprintk("RPC: %5u xprt_cwnd_limited cong = %lu cwnd = %lu\n",
389 task->tk_pid, xprt->cong, xprt->cwnd);
390 if (RPCXPRT_CONGESTED(xprt))
391 return 0;
392 req->rq_cong = 1;
393 xprt->cong += RPC_CWNDSCALE;
394 return 1;
395 }
396
397 /*
398 * Adjust the congestion window, and wake up the next task
399 * that has been sleeping due to congestion
400 */
401 static void
__xprt_put_cong(struct rpc_xprt * xprt,struct rpc_rqst * req)402 __xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
403 {
404 if (!req->rq_cong)
405 return;
406 req->rq_cong = 0;
407 xprt->cong -= RPC_CWNDSCALE;
408 __xprt_lock_write_next_cong(xprt);
409 }
410
411 /**
412 * xprt_release_rqst_cong - housekeeping when request is complete
413 * @task: RPC request that recently completed
414 *
415 * Useful for transports that require congestion control.
416 */
xprt_release_rqst_cong(struct rpc_task * task)417 void xprt_release_rqst_cong(struct rpc_task *task)
418 {
419 struct rpc_rqst *req = task->tk_rqstp;
420
421 __xprt_put_cong(req->rq_xprt, req);
422 }
423 EXPORT_SYMBOL_GPL(xprt_release_rqst_cong);
424
425 /**
426 * xprt_adjust_cwnd - adjust transport congestion window
427 * @xprt: pointer to xprt
428 * @task: recently completed RPC request used to adjust window
429 * @result: result code of completed RPC request
430 *
431 * The transport code maintains an estimate on the maximum number of out-
432 * standing RPC requests, using a smoothed version of the congestion
433 * avoidance implemented in 44BSD. This is basically the Van Jacobson
434 * congestion algorithm: If a retransmit occurs, the congestion window is
435 * halved; otherwise, it is incremented by 1/cwnd when
436 *
437 * - a reply is received and
438 * - a full number of requests are outstanding and
439 * - the congestion window hasn't been updated recently.
440 */
xprt_adjust_cwnd(struct rpc_xprt * xprt,struct rpc_task * task,int result)441 void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result)
442 {
443 struct rpc_rqst *req = task->tk_rqstp;
444 unsigned long cwnd = xprt->cwnd;
445
446 if (result >= 0 && cwnd <= xprt->cong) {
447 /* The (cwnd >> 1) term makes sure
448 * the result gets rounded properly. */
449 cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd;
450 if (cwnd > RPC_MAXCWND(xprt))
451 cwnd = RPC_MAXCWND(xprt);
452 __xprt_lock_write_next_cong(xprt);
453 } else if (result == -ETIMEDOUT) {
454 cwnd >>= 1;
455 if (cwnd < RPC_CWNDSCALE)
456 cwnd = RPC_CWNDSCALE;
457 }
458 dprintk("RPC: cong %ld, cwnd was %ld, now %ld\n",
459 xprt->cong, xprt->cwnd, cwnd);
460 xprt->cwnd = cwnd;
461 __xprt_put_cong(xprt, req);
462 }
463 EXPORT_SYMBOL_GPL(xprt_adjust_cwnd);
464
465 /**
466 * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue
467 * @xprt: transport with waiting tasks
468 * @status: result code to plant in each task before waking it
469 *
470 */
xprt_wake_pending_tasks(struct rpc_xprt * xprt,int status)471 void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status)
472 {
473 if (status < 0)
474 rpc_wake_up_status(&xprt->pending, status);
475 else
476 rpc_wake_up(&xprt->pending);
477 }
478 EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks);
479
480 /**
481 * xprt_wait_for_buffer_space - wait for transport output buffer to clear
482 * @task: task to be put to sleep
483 * @action: function pointer to be executed after wait
484 *
485 * Note that we only set the timer for the case of RPC_IS_SOFT(), since
486 * we don't in general want to force a socket disconnection due to
487 * an incomplete RPC call transmission.
488 */
xprt_wait_for_buffer_space(struct rpc_task * task,rpc_action action)489 void xprt_wait_for_buffer_space(struct rpc_task *task, rpc_action action)
490 {
491 struct rpc_rqst *req = task->tk_rqstp;
492 struct rpc_xprt *xprt = req->rq_xprt;
493
494 task->tk_timeout = RPC_IS_SOFT(task) ? req->rq_timeout : 0;
495 rpc_sleep_on(&xprt->pending, task, action);
496 }
497 EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space);
498
499 /**
500 * xprt_write_space - wake the task waiting for transport output buffer space
501 * @xprt: transport with waiting tasks
502 *
503 * Can be called in a soft IRQ context, so xprt_write_space never sleeps.
504 */
xprt_write_space(struct rpc_xprt * xprt)505 void xprt_write_space(struct rpc_xprt *xprt)
506 {
507 spin_lock_bh(&xprt->transport_lock);
508 if (xprt->snd_task) {
509 dprintk("RPC: write space: waking waiting task on "
510 "xprt %p\n", xprt);
511 rpc_wake_up_queued_task(&xprt->pending, xprt->snd_task);
512 }
513 spin_unlock_bh(&xprt->transport_lock);
514 }
515 EXPORT_SYMBOL_GPL(xprt_write_space);
516
517 /**
518 * xprt_set_retrans_timeout_def - set a request's retransmit timeout
519 * @task: task whose timeout is to be set
520 *
521 * Set a request's retransmit timeout based on the transport's
522 * default timeout parameters. Used by transports that don't adjust
523 * the retransmit timeout based on round-trip time estimation.
524 */
xprt_set_retrans_timeout_def(struct rpc_task * task)525 void xprt_set_retrans_timeout_def(struct rpc_task *task)
526 {
527 task->tk_timeout = task->tk_rqstp->rq_timeout;
528 }
529 EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_def);
530
531 /**
532 * xprt_set_retrans_timeout_rtt - set a request's retransmit timeout
533 * @task: task whose timeout is to be set
534 *
535 * Set a request's retransmit timeout using the RTT estimator.
536 */
xprt_set_retrans_timeout_rtt(struct rpc_task * task)537 void xprt_set_retrans_timeout_rtt(struct rpc_task *task)
538 {
539 int timer = task->tk_msg.rpc_proc->p_timer;
540 struct rpc_clnt *clnt = task->tk_client;
541 struct rpc_rtt *rtt = clnt->cl_rtt;
542 struct rpc_rqst *req = task->tk_rqstp;
543 unsigned long max_timeout = clnt->cl_timeout->to_maxval;
544
545 task->tk_timeout = rpc_calc_rto(rtt, timer);
546 task->tk_timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries;
547 if (task->tk_timeout > max_timeout || task->tk_timeout == 0)
548 task->tk_timeout = max_timeout;
549 }
550 EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_rtt);
551
xprt_reset_majortimeo(struct rpc_rqst * req)552 static void xprt_reset_majortimeo(struct rpc_rqst *req)
553 {
554 const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
555
556 req->rq_majortimeo = req->rq_timeout;
557 if (to->to_exponential)
558 req->rq_majortimeo <<= to->to_retries;
559 else
560 req->rq_majortimeo += to->to_increment * to->to_retries;
561 if (req->rq_majortimeo > to->to_maxval || req->rq_majortimeo == 0)
562 req->rq_majortimeo = to->to_maxval;
563 req->rq_majortimeo += jiffies;
564 }
565
566 /**
567 * xprt_adjust_timeout - adjust timeout values for next retransmit
568 * @req: RPC request containing parameters to use for the adjustment
569 *
570 */
xprt_adjust_timeout(struct rpc_rqst * req)571 int xprt_adjust_timeout(struct rpc_rqst *req)
572 {
573 struct rpc_xprt *xprt = req->rq_xprt;
574 const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
575 int status = 0;
576
577 if (time_before(jiffies, req->rq_majortimeo)) {
578 if (to->to_exponential)
579 req->rq_timeout <<= 1;
580 else
581 req->rq_timeout += to->to_increment;
582 if (to->to_maxval && req->rq_timeout >= to->to_maxval)
583 req->rq_timeout = to->to_maxval;
584 req->rq_retries++;
585 } else {
586 req->rq_timeout = to->to_initval;
587 req->rq_retries = 0;
588 xprt_reset_majortimeo(req);
589 /* Reset the RTT counters == "slow start" */
590 spin_lock_bh(&xprt->transport_lock);
591 rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval);
592 spin_unlock_bh(&xprt->transport_lock);
593 status = -ETIMEDOUT;
594 }
595
596 if (req->rq_timeout == 0) {
597 printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n");
598 req->rq_timeout = 5 * HZ;
599 }
600 return status;
601 }
602
xprt_autoclose(struct work_struct * work)603 static void xprt_autoclose(struct work_struct *work)
604 {
605 struct rpc_xprt *xprt =
606 container_of(work, struct rpc_xprt, task_cleanup);
607
608 xprt->ops->close(xprt);
609 clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
610 xprt_release_write(xprt, NULL);
611 }
612
613 /**
614 * xprt_disconnect_done - mark a transport as disconnected
615 * @xprt: transport to flag for disconnect
616 *
617 */
xprt_disconnect_done(struct rpc_xprt * xprt)618 void xprt_disconnect_done(struct rpc_xprt *xprt)
619 {
620 dprintk("RPC: disconnected transport %p\n", xprt);
621 spin_lock_bh(&xprt->transport_lock);
622 xprt_clear_connected(xprt);
623 xprt_wake_pending_tasks(xprt, -EAGAIN);
624 spin_unlock_bh(&xprt->transport_lock);
625 }
626 EXPORT_SYMBOL_GPL(xprt_disconnect_done);
627
628 /**
629 * xprt_force_disconnect - force a transport to disconnect
630 * @xprt: transport to disconnect
631 *
632 */
xprt_force_disconnect(struct rpc_xprt * xprt)633 void xprt_force_disconnect(struct rpc_xprt *xprt)
634 {
635 /* Don't race with the test_bit() in xprt_clear_locked() */
636 spin_lock_bh(&xprt->transport_lock);
637 set_bit(XPRT_CLOSE_WAIT, &xprt->state);
638 /* Try to schedule an autoclose RPC call */
639 if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
640 queue_work(rpciod_workqueue, &xprt->task_cleanup);
641 xprt_wake_pending_tasks(xprt, -EAGAIN);
642 spin_unlock_bh(&xprt->transport_lock);
643 }
644
645 /**
646 * xprt_conditional_disconnect - force a transport to disconnect
647 * @xprt: transport to disconnect
648 * @cookie: 'connection cookie'
649 *
650 * This attempts to break the connection if and only if 'cookie' matches
651 * the current transport 'connection cookie'. It ensures that we don't
652 * try to break the connection more than once when we need to retransmit
653 * a batch of RPC requests.
654 *
655 */
xprt_conditional_disconnect(struct rpc_xprt * xprt,unsigned int cookie)656 void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie)
657 {
658 /* Don't race with the test_bit() in xprt_clear_locked() */
659 spin_lock_bh(&xprt->transport_lock);
660 if (cookie != xprt->connect_cookie)
661 goto out;
662 if (test_bit(XPRT_CLOSING, &xprt->state) || !xprt_connected(xprt))
663 goto out;
664 set_bit(XPRT_CLOSE_WAIT, &xprt->state);
665 /* Try to schedule an autoclose RPC call */
666 if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
667 queue_work(rpciod_workqueue, &xprt->task_cleanup);
668 xprt_wake_pending_tasks(xprt, -EAGAIN);
669 out:
670 spin_unlock_bh(&xprt->transport_lock);
671 }
672
673 static void
xprt_init_autodisconnect(unsigned long data)674 xprt_init_autodisconnect(unsigned long data)
675 {
676 struct rpc_xprt *xprt = (struct rpc_xprt *)data;
677
678 spin_lock(&xprt->transport_lock);
679 if (!list_empty(&xprt->recv))
680 goto out_abort;
681 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
682 goto out_abort;
683 spin_unlock(&xprt->transport_lock);
684 set_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
685 queue_work(rpciod_workqueue, &xprt->task_cleanup);
686 return;
687 out_abort:
688 spin_unlock(&xprt->transport_lock);
689 }
690
691 /**
692 * xprt_connect - schedule a transport connect operation
693 * @task: RPC task that is requesting the connect
694 *
695 */
xprt_connect(struct rpc_task * task)696 void xprt_connect(struct rpc_task *task)
697 {
698 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
699
700 dprintk("RPC: %5u xprt_connect xprt %p %s connected\n", task->tk_pid,
701 xprt, (xprt_connected(xprt) ? "is" : "is not"));
702
703 if (!xprt_bound(xprt)) {
704 task->tk_status = -EAGAIN;
705 return;
706 }
707 if (!xprt_lock_write(xprt, task))
708 return;
709
710 if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state))
711 xprt->ops->close(xprt);
712
713 if (xprt_connected(xprt))
714 xprt_release_write(xprt, task);
715 else {
716 task->tk_rqstp->rq_bytes_sent = 0;
717 task->tk_timeout = task->tk_rqstp->rq_timeout;
718 rpc_sleep_on(&xprt->pending, task, xprt_connect_status);
719
720 if (test_bit(XPRT_CLOSING, &xprt->state))
721 return;
722 if (xprt_test_and_set_connecting(xprt))
723 return;
724 xprt->stat.connect_start = jiffies;
725 xprt->ops->connect(xprt, task);
726 }
727 }
728
xprt_connect_status(struct rpc_task * task)729 static void xprt_connect_status(struct rpc_task *task)
730 {
731 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
732
733 if (task->tk_status == 0) {
734 xprt->stat.connect_count++;
735 xprt->stat.connect_time += (long)jiffies - xprt->stat.connect_start;
736 dprintk("RPC: %5u xprt_connect_status: connection established\n",
737 task->tk_pid);
738 return;
739 }
740
741 switch (task->tk_status) {
742 case -ECONNREFUSED:
743 case -ECONNRESET:
744 case -ECONNABORTED:
745 case -ENETUNREACH:
746 case -EHOSTUNREACH:
747 case -EPIPE:
748 case -EAGAIN:
749 dprintk("RPC: %5u xprt_connect_status: retrying\n", task->tk_pid);
750 break;
751 case -ETIMEDOUT:
752 dprintk("RPC: %5u xprt_connect_status: connect attempt timed "
753 "out\n", task->tk_pid);
754 break;
755 default:
756 dprintk("RPC: %5u xprt_connect_status: error %d connecting to "
757 "server %s\n", task->tk_pid, -task->tk_status,
758 xprt->servername);
759 xprt_release_write(xprt, task);
760 task->tk_status = -EIO;
761 }
762 }
763
764 /**
765 * xprt_lookup_rqst - find an RPC request corresponding to an XID
766 * @xprt: transport on which the original request was transmitted
767 * @xid: RPC XID of incoming reply
768 *
769 */
xprt_lookup_rqst(struct rpc_xprt * xprt,__be32 xid)770 struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
771 {
772 struct rpc_rqst *entry;
773
774 list_for_each_entry(entry, &xprt->recv, rq_list)
775 if (entry->rq_xid == xid)
776 return entry;
777
778 dprintk("RPC: xprt_lookup_rqst did not find xid %08x\n",
779 ntohl(xid));
780 xprt->stat.bad_xids++;
781 return NULL;
782 }
783 EXPORT_SYMBOL_GPL(xprt_lookup_rqst);
784
xprt_update_rtt(struct rpc_task * task)785 static void xprt_update_rtt(struct rpc_task *task)
786 {
787 struct rpc_rqst *req = task->tk_rqstp;
788 struct rpc_rtt *rtt = task->tk_client->cl_rtt;
789 unsigned int timer = task->tk_msg.rpc_proc->p_timer;
790 long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt));
791
792 if (timer) {
793 if (req->rq_ntrans == 1)
794 rpc_update_rtt(rtt, timer, m);
795 rpc_set_timeo(rtt, timer, req->rq_ntrans - 1);
796 }
797 }
798
799 /**
800 * xprt_complete_rqst - called when reply processing is complete
801 * @task: RPC request that recently completed
802 * @copied: actual number of bytes received from the transport
803 *
804 * Caller holds transport lock.
805 */
xprt_complete_rqst(struct rpc_task * task,int copied)806 void xprt_complete_rqst(struct rpc_task *task, int copied)
807 {
808 struct rpc_rqst *req = task->tk_rqstp;
809 struct rpc_xprt *xprt = req->rq_xprt;
810
811 dprintk("RPC: %5u xid %08x complete (%d bytes received)\n",
812 task->tk_pid, ntohl(req->rq_xid), copied);
813
814 xprt->stat.recvs++;
815 req->rq_rtt = ktime_sub(ktime_get(), req->rq_xtime);
816 if (xprt->ops->timer != NULL)
817 xprt_update_rtt(task);
818
819 list_del_init(&req->rq_list);
820 req->rq_private_buf.len = copied;
821 /* Ensure all writes are done before we update */
822 /* req->rq_reply_bytes_recvd */
823 smp_wmb();
824 req->rq_reply_bytes_recvd = copied;
825 rpc_wake_up_queued_task(&xprt->pending, task);
826 }
827 EXPORT_SYMBOL_GPL(xprt_complete_rqst);
828
xprt_timer(struct rpc_task * task)829 static void xprt_timer(struct rpc_task *task)
830 {
831 struct rpc_rqst *req = task->tk_rqstp;
832 struct rpc_xprt *xprt = req->rq_xprt;
833
834 if (task->tk_status != -ETIMEDOUT)
835 return;
836 dprintk("RPC: %5u xprt_timer\n", task->tk_pid);
837
838 spin_lock_bh(&xprt->transport_lock);
839 if (!req->rq_reply_bytes_recvd) {
840 if (xprt->ops->timer)
841 xprt->ops->timer(xprt, task);
842 } else
843 task->tk_status = 0;
844 spin_unlock_bh(&xprt->transport_lock);
845 }
846
xprt_has_timer(struct rpc_xprt * xprt)847 static inline int xprt_has_timer(struct rpc_xprt *xprt)
848 {
849 return xprt->idle_timeout != 0;
850 }
851
852 /**
853 * xprt_prepare_transmit - reserve the transport before sending a request
854 * @task: RPC task about to send a request
855 *
856 */
xprt_prepare_transmit(struct rpc_task * task)857 bool xprt_prepare_transmit(struct rpc_task *task)
858 {
859 struct rpc_rqst *req = task->tk_rqstp;
860 struct rpc_xprt *xprt = req->rq_xprt;
861 bool ret = false;
862
863 dprintk("RPC: %5u xprt_prepare_transmit\n", task->tk_pid);
864
865 spin_lock_bh(&xprt->transport_lock);
866 if (!req->rq_bytes_sent) {
867 if (req->rq_reply_bytes_recvd) {
868 task->tk_status = req->rq_reply_bytes_recvd;
869 goto out_unlock;
870 }
871 if ((task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT)
872 && xprt_connected(xprt)
873 && req->rq_connect_cookie == xprt->connect_cookie) {
874 xprt->ops->set_retrans_timeout(task);
875 rpc_sleep_on(&xprt->pending, task, xprt_timer);
876 goto out_unlock;
877 }
878 }
879 if (!xprt->ops->reserve_xprt(xprt, task)) {
880 task->tk_status = -EAGAIN;
881 goto out_unlock;
882 }
883 ret = true;
884 out_unlock:
885 spin_unlock_bh(&xprt->transport_lock);
886 return ret;
887 }
888
xprt_end_transmit(struct rpc_task * task)889 void xprt_end_transmit(struct rpc_task *task)
890 {
891 xprt_release_write(task->tk_rqstp->rq_xprt, task);
892 }
893
894 /**
895 * xprt_transmit - send an RPC request on a transport
896 * @task: controlling RPC task
897 *
898 * We have to copy the iovec because sendmsg fiddles with its contents.
899 */
xprt_transmit(struct rpc_task * task)900 void xprt_transmit(struct rpc_task *task)
901 {
902 struct rpc_rqst *req = task->tk_rqstp;
903 struct rpc_xprt *xprt = req->rq_xprt;
904 int status, numreqs;
905
906 dprintk("RPC: %5u xprt_transmit(%u)\n", task->tk_pid, req->rq_slen);
907
908 if (!req->rq_reply_bytes_recvd) {
909 if (list_empty(&req->rq_list) && rpc_reply_expected(task)) {
910 /*
911 * Add to the list only if we're expecting a reply
912 */
913 spin_lock_bh(&xprt->transport_lock);
914 /* Update the softirq receive buffer */
915 memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
916 sizeof(req->rq_private_buf));
917 /* Add request to the receive list */
918 list_add_tail(&req->rq_list, &xprt->recv);
919 spin_unlock_bh(&xprt->transport_lock);
920 xprt_reset_majortimeo(req);
921 /* Turn off autodisconnect */
922 del_singleshot_timer_sync(&xprt->timer);
923 }
924 } else if (!req->rq_bytes_sent)
925 return;
926
927 req->rq_xtime = ktime_get();
928 status = xprt->ops->send_request(task);
929 if (status != 0) {
930 task->tk_status = status;
931 return;
932 }
933
934 dprintk("RPC: %5u xmit complete\n", task->tk_pid);
935 task->tk_flags |= RPC_TASK_SENT;
936 spin_lock_bh(&xprt->transport_lock);
937
938 xprt->ops->set_retrans_timeout(task);
939
940 numreqs = atomic_read(&xprt->num_reqs);
941 if (numreqs > xprt->stat.max_slots)
942 xprt->stat.max_slots = numreqs;
943 xprt->stat.sends++;
944 xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs;
945 xprt->stat.bklog_u += xprt->backlog.qlen;
946 xprt->stat.sending_u += xprt->sending.qlen;
947 xprt->stat.pending_u += xprt->pending.qlen;
948
949 /* Don't race with disconnect */
950 if (!xprt_connected(xprt))
951 task->tk_status = -ENOTCONN;
952 else {
953 /*
954 * Sleep on the pending queue since
955 * we're expecting a reply.
956 */
957 if (!req->rq_reply_bytes_recvd && rpc_reply_expected(task))
958 rpc_sleep_on(&xprt->pending, task, xprt_timer);
959 req->rq_connect_cookie = xprt->connect_cookie;
960 }
961 spin_unlock_bh(&xprt->transport_lock);
962 }
963
xprt_add_backlog(struct rpc_xprt * xprt,struct rpc_task * task)964 static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
965 {
966 set_bit(XPRT_CONGESTED, &xprt->state);
967 rpc_sleep_on(&xprt->backlog, task, NULL);
968 }
969
xprt_wake_up_backlog(struct rpc_xprt * xprt)970 static void xprt_wake_up_backlog(struct rpc_xprt *xprt)
971 {
972 if (rpc_wake_up_next(&xprt->backlog) == NULL)
973 clear_bit(XPRT_CONGESTED, &xprt->state);
974 }
975
xprt_throttle_congested(struct rpc_xprt * xprt,struct rpc_task * task)976 static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task)
977 {
978 bool ret = false;
979
980 if (!test_bit(XPRT_CONGESTED, &xprt->state))
981 goto out;
982 spin_lock(&xprt->reserve_lock);
983 if (test_bit(XPRT_CONGESTED, &xprt->state)) {
984 rpc_sleep_on(&xprt->backlog, task, NULL);
985 ret = true;
986 }
987 spin_unlock(&xprt->reserve_lock);
988 out:
989 return ret;
990 }
991
xprt_dynamic_alloc_slot(struct rpc_xprt * xprt,gfp_t gfp_flags)992 static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt, gfp_t gfp_flags)
993 {
994 struct rpc_rqst *req = ERR_PTR(-EAGAIN);
995
996 if (!atomic_add_unless(&xprt->num_reqs, 1, xprt->max_reqs))
997 goto out;
998 req = kzalloc(sizeof(struct rpc_rqst), gfp_flags);
999 if (req != NULL)
1000 goto out;
1001 atomic_dec(&xprt->num_reqs);
1002 req = ERR_PTR(-ENOMEM);
1003 out:
1004 return req;
1005 }
1006
xprt_dynamic_free_slot(struct rpc_xprt * xprt,struct rpc_rqst * req)1007 static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1008 {
1009 if (atomic_add_unless(&xprt->num_reqs, -1, xprt->min_reqs)) {
1010 kfree(req);
1011 return true;
1012 }
1013 return false;
1014 }
1015
xprt_alloc_slot(struct rpc_xprt * xprt,struct rpc_task * task)1016 void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
1017 {
1018 struct rpc_rqst *req;
1019
1020 spin_lock(&xprt->reserve_lock);
1021 if (!list_empty(&xprt->free)) {
1022 req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
1023 list_del(&req->rq_list);
1024 goto out_init_req;
1025 }
1026 req = xprt_dynamic_alloc_slot(xprt, GFP_NOWAIT|__GFP_NOWARN);
1027 if (!IS_ERR(req))
1028 goto out_init_req;
1029 switch (PTR_ERR(req)) {
1030 case -ENOMEM:
1031 dprintk("RPC: dynamic allocation of request slot "
1032 "failed! Retrying\n");
1033 task->tk_status = -ENOMEM;
1034 break;
1035 case -EAGAIN:
1036 xprt_add_backlog(xprt, task);
1037 dprintk("RPC: waiting for request slot\n");
1038 default:
1039 task->tk_status = -EAGAIN;
1040 }
1041 spin_unlock(&xprt->reserve_lock);
1042 return;
1043 out_init_req:
1044 task->tk_status = 0;
1045 task->tk_rqstp = req;
1046 xprt_request_init(task, xprt);
1047 spin_unlock(&xprt->reserve_lock);
1048 }
1049 EXPORT_SYMBOL_GPL(xprt_alloc_slot);
1050
xprt_lock_and_alloc_slot(struct rpc_xprt * xprt,struct rpc_task * task)1051 void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
1052 {
1053 /* Note: grabbing the xprt_lock_write() ensures that we throttle
1054 * new slot allocation if the transport is congested (i.e. when
1055 * reconnecting a stream transport or when out of socket write
1056 * buffer space).
1057 */
1058 if (xprt_lock_write(xprt, task)) {
1059 xprt_alloc_slot(xprt, task);
1060 xprt_release_write(xprt, task);
1061 }
1062 }
1063 EXPORT_SYMBOL_GPL(xprt_lock_and_alloc_slot);
1064
xprt_free_slot(struct rpc_xprt * xprt,struct rpc_rqst * req)1065 static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1066 {
1067 spin_lock(&xprt->reserve_lock);
1068 if (!xprt_dynamic_free_slot(xprt, req)) {
1069 memset(req, 0, sizeof(*req)); /* mark unused */
1070 list_add(&req->rq_list, &xprt->free);
1071 }
1072 xprt_wake_up_backlog(xprt);
1073 spin_unlock(&xprt->reserve_lock);
1074 }
1075
xprt_free_all_slots(struct rpc_xprt * xprt)1076 static void xprt_free_all_slots(struct rpc_xprt *xprt)
1077 {
1078 struct rpc_rqst *req;
1079 while (!list_empty(&xprt->free)) {
1080 req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list);
1081 list_del(&req->rq_list);
1082 kfree(req);
1083 }
1084 }
1085
xprt_alloc(struct net * net,size_t size,unsigned int num_prealloc,unsigned int max_alloc)1086 struct rpc_xprt *xprt_alloc(struct net *net, size_t size,
1087 unsigned int num_prealloc,
1088 unsigned int max_alloc)
1089 {
1090 struct rpc_xprt *xprt;
1091 struct rpc_rqst *req;
1092 int i;
1093
1094 xprt = kzalloc(size, GFP_KERNEL);
1095 if (xprt == NULL)
1096 goto out;
1097
1098 xprt_init(xprt, net);
1099
1100 for (i = 0; i < num_prealloc; i++) {
1101 req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL);
1102 if (!req)
1103 goto out_free;
1104 list_add(&req->rq_list, &xprt->free);
1105 }
1106 if (max_alloc > num_prealloc)
1107 xprt->max_reqs = max_alloc;
1108 else
1109 xprt->max_reqs = num_prealloc;
1110 xprt->min_reqs = num_prealloc;
1111 atomic_set(&xprt->num_reqs, num_prealloc);
1112
1113 return xprt;
1114
1115 out_free:
1116 xprt_free(xprt);
1117 out:
1118 return NULL;
1119 }
1120 EXPORT_SYMBOL_GPL(xprt_alloc);
1121
xprt_free(struct rpc_xprt * xprt)1122 void xprt_free(struct rpc_xprt *xprt)
1123 {
1124 put_net(xprt->xprt_net);
1125 xprt_free_all_slots(xprt);
1126 kfree(xprt);
1127 }
1128 EXPORT_SYMBOL_GPL(xprt_free);
1129
1130 /**
1131 * xprt_reserve - allocate an RPC request slot
1132 * @task: RPC task requesting a slot allocation
1133 *
1134 * If the transport is marked as being congested, or if no more
1135 * slots are available, place the task on the transport's
1136 * backlog queue.
1137 */
xprt_reserve(struct rpc_task * task)1138 void xprt_reserve(struct rpc_task *task)
1139 {
1140 struct rpc_xprt *xprt;
1141
1142 task->tk_status = 0;
1143 if (task->tk_rqstp != NULL)
1144 return;
1145
1146 task->tk_timeout = 0;
1147 task->tk_status = -EAGAIN;
1148 rcu_read_lock();
1149 xprt = rcu_dereference(task->tk_client->cl_xprt);
1150 if (!xprt_throttle_congested(xprt, task))
1151 xprt->ops->alloc_slot(xprt, task);
1152 rcu_read_unlock();
1153 }
1154
1155 /**
1156 * xprt_retry_reserve - allocate an RPC request slot
1157 * @task: RPC task requesting a slot allocation
1158 *
1159 * If no more slots are available, place the task on the transport's
1160 * backlog queue.
1161 * Note that the only difference with xprt_reserve is that we now
1162 * ignore the value of the XPRT_CONGESTED flag.
1163 */
xprt_retry_reserve(struct rpc_task * task)1164 void xprt_retry_reserve(struct rpc_task *task)
1165 {
1166 struct rpc_xprt *xprt;
1167
1168 task->tk_status = 0;
1169 if (task->tk_rqstp != NULL)
1170 return;
1171
1172 task->tk_timeout = 0;
1173 task->tk_status = -EAGAIN;
1174 rcu_read_lock();
1175 xprt = rcu_dereference(task->tk_client->cl_xprt);
1176 xprt->ops->alloc_slot(xprt, task);
1177 rcu_read_unlock();
1178 }
1179
xprt_alloc_xid(struct rpc_xprt * xprt)1180 static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt)
1181 {
1182 return (__force __be32)xprt->xid++;
1183 }
1184
xprt_init_xid(struct rpc_xprt * xprt)1185 static inline void xprt_init_xid(struct rpc_xprt *xprt)
1186 {
1187 xprt->xid = prandom_u32();
1188 }
1189
xprt_request_init(struct rpc_task * task,struct rpc_xprt * xprt)1190 static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
1191 {
1192 struct rpc_rqst *req = task->tk_rqstp;
1193
1194 INIT_LIST_HEAD(&req->rq_list);
1195 req->rq_timeout = task->tk_client->cl_timeout->to_initval;
1196 req->rq_task = task;
1197 req->rq_xprt = xprt;
1198 req->rq_buffer = NULL;
1199 req->rq_xid = xprt_alloc_xid(xprt);
1200 req->rq_connect_cookie = xprt->connect_cookie - 1;
1201 req->rq_bytes_sent = 0;
1202 req->rq_snd_buf.len = 0;
1203 req->rq_snd_buf.buflen = 0;
1204 req->rq_rcv_buf.len = 0;
1205 req->rq_rcv_buf.buflen = 0;
1206 req->rq_release_snd_buf = NULL;
1207 xprt_reset_majortimeo(req);
1208 dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid,
1209 req, ntohl(req->rq_xid));
1210 }
1211
1212 /**
1213 * xprt_release - release an RPC request slot
1214 * @task: task which is finished with the slot
1215 *
1216 */
xprt_release(struct rpc_task * task)1217 void xprt_release(struct rpc_task *task)
1218 {
1219 struct rpc_xprt *xprt;
1220 struct rpc_rqst *req = task->tk_rqstp;
1221
1222 if (req == NULL) {
1223 if (task->tk_client) {
1224 rcu_read_lock();
1225 xprt = rcu_dereference(task->tk_client->cl_xprt);
1226 if (xprt->snd_task == task)
1227 xprt_release_write(xprt, task);
1228 rcu_read_unlock();
1229 }
1230 return;
1231 }
1232
1233 xprt = req->rq_xprt;
1234 if (task->tk_ops->rpc_count_stats != NULL)
1235 task->tk_ops->rpc_count_stats(task, task->tk_calldata);
1236 else if (task->tk_client)
1237 rpc_count_iostats(task, task->tk_client->cl_metrics);
1238 spin_lock_bh(&xprt->transport_lock);
1239 xprt->ops->release_xprt(xprt, task);
1240 if (xprt->ops->release_request)
1241 xprt->ops->release_request(task);
1242 if (!list_empty(&req->rq_list))
1243 list_del(&req->rq_list);
1244 xprt->last_used = jiffies;
1245 if (list_empty(&xprt->recv) && xprt_has_timer(xprt))
1246 mod_timer(&xprt->timer,
1247 xprt->last_used + xprt->idle_timeout);
1248 spin_unlock_bh(&xprt->transport_lock);
1249 if (req->rq_buffer)
1250 xprt->ops->buf_free(req->rq_buffer);
1251 if (req->rq_cred != NULL)
1252 put_rpccred(req->rq_cred);
1253 task->tk_rqstp = NULL;
1254 if (req->rq_release_snd_buf)
1255 req->rq_release_snd_buf(req);
1256
1257 dprintk("RPC: %5u release request %p\n", task->tk_pid, req);
1258 if (likely(!bc_prealloc(req)))
1259 xprt_free_slot(xprt, req);
1260 else
1261 xprt_free_bc_request(req);
1262 }
1263
xprt_init(struct rpc_xprt * xprt,struct net * net)1264 static void xprt_init(struct rpc_xprt *xprt, struct net *net)
1265 {
1266 atomic_set(&xprt->count, 1);
1267
1268 spin_lock_init(&xprt->transport_lock);
1269 spin_lock_init(&xprt->reserve_lock);
1270
1271 INIT_LIST_HEAD(&xprt->free);
1272 INIT_LIST_HEAD(&xprt->recv);
1273 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1274 spin_lock_init(&xprt->bc_pa_lock);
1275 INIT_LIST_HEAD(&xprt->bc_pa_list);
1276 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
1277
1278 xprt->last_used = jiffies;
1279 xprt->cwnd = RPC_INITCWND;
1280 xprt->bind_index = 0;
1281
1282 rpc_init_wait_queue(&xprt->binding, "xprt_binding");
1283 rpc_init_wait_queue(&xprt->pending, "xprt_pending");
1284 rpc_init_priority_wait_queue(&xprt->sending, "xprt_sending");
1285 rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");
1286
1287 xprt_init_xid(xprt);
1288
1289 xprt->xprt_net = get_net(net);
1290 }
1291
1292 /**
1293 * xprt_create_transport - create an RPC transport
1294 * @args: rpc transport creation arguments
1295 *
1296 */
xprt_create_transport(struct xprt_create * args)1297 struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
1298 {
1299 struct rpc_xprt *xprt;
1300 struct xprt_class *t;
1301
1302 spin_lock(&xprt_list_lock);
1303 list_for_each_entry(t, &xprt_list, list) {
1304 if (t->ident == args->ident) {
1305 spin_unlock(&xprt_list_lock);
1306 goto found;
1307 }
1308 }
1309 spin_unlock(&xprt_list_lock);
1310 dprintk("RPC: transport (%d) not supported\n", args->ident);
1311 return ERR_PTR(-EIO);
1312
1313 found:
1314 xprt = t->setup(args);
1315 if (IS_ERR(xprt)) {
1316 dprintk("RPC: xprt_create_transport: failed, %ld\n",
1317 -PTR_ERR(xprt));
1318 goto out;
1319 }
1320 if (args->flags & XPRT_CREATE_NO_IDLE_TIMEOUT)
1321 xprt->idle_timeout = 0;
1322 INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
1323 if (xprt_has_timer(xprt))
1324 setup_timer(&xprt->timer, xprt_init_autodisconnect,
1325 (unsigned long)xprt);
1326 else
1327 init_timer(&xprt->timer);
1328
1329 if (strlen(args->servername) > RPC_MAXNETNAMELEN) {
1330 xprt_destroy(xprt);
1331 return ERR_PTR(-EINVAL);
1332 }
1333 xprt->servername = kstrdup(args->servername, GFP_KERNEL);
1334 if (xprt->servername == NULL) {
1335 xprt_destroy(xprt);
1336 return ERR_PTR(-ENOMEM);
1337 }
1338
1339 dprintk("RPC: created transport %p with %u slots\n", xprt,
1340 xprt->max_reqs);
1341 out:
1342 return xprt;
1343 }
1344
1345 /**
1346 * xprt_destroy - destroy an RPC transport, killing off all requests.
1347 * @xprt: transport to destroy
1348 *
1349 */
xprt_destroy(struct rpc_xprt * xprt)1350 static void xprt_destroy(struct rpc_xprt *xprt)
1351 {
1352 dprintk("RPC: destroying transport %p\n", xprt);
1353 del_timer_sync(&xprt->timer);
1354
1355 rpc_destroy_wait_queue(&xprt->binding);
1356 rpc_destroy_wait_queue(&xprt->pending);
1357 rpc_destroy_wait_queue(&xprt->sending);
1358 rpc_destroy_wait_queue(&xprt->backlog);
1359 cancel_work_sync(&xprt->task_cleanup);
1360 kfree(xprt->servername);
1361 /*
1362 * Tear down transport state and free the rpc_xprt
1363 */
1364 xprt->ops->destroy(xprt);
1365 }
1366
1367 /**
1368 * xprt_put - release a reference to an RPC transport.
1369 * @xprt: pointer to the transport
1370 *
1371 */
xprt_put(struct rpc_xprt * xprt)1372 void xprt_put(struct rpc_xprt *xprt)
1373 {
1374 if (atomic_dec_and_test(&xprt->count))
1375 xprt_destroy(xprt);
1376 }
1377