• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * linux/net/sunrpc/sched.c
3  *
4  * Scheduling for synchronous and asynchronous RPC requests.
5  *
6  * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de>
7  *
8  * TCP NFS related read + write fixes
9  * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
10  */
11 
12 #include <linux/module.h>
13 
14 #include <linux/sched.h>
15 #include <linux/interrupt.h>
16 #include <linux/slab.h>
17 #include <linux/mempool.h>
18 #include <linux/smp.h>
19 #include <linux/spinlock.h>
20 #include <linux/mutex.h>
21 #include <linux/freezer.h>
22 
23 #include <linux/sunrpc/clnt.h>
24 
25 #include "sunrpc.h"
26 
27 #ifdef RPC_DEBUG
28 #define RPCDBG_FACILITY		RPCDBG_SCHED
29 #endif
30 
31 #define CREATE_TRACE_POINTS
32 #include <trace/events/sunrpc.h>
33 
34 /*
35  * RPC slabs and memory pools
36  */
37 #define RPC_BUFFER_MAXSIZE	(2048)
38 #define RPC_BUFFER_POOLSIZE	(8)
39 #define RPC_TASK_POOLSIZE	(8)
40 static struct kmem_cache	*rpc_task_slabp __read_mostly;
41 static struct kmem_cache	*rpc_buffer_slabp __read_mostly;
42 static mempool_t	*rpc_task_mempool __read_mostly;
43 static mempool_t	*rpc_buffer_mempool __read_mostly;
44 
45 static void			rpc_async_schedule(struct work_struct *);
46 static void			 rpc_release_task(struct rpc_task *task);
47 static void __rpc_queue_timer_fn(unsigned long ptr);
48 
49 /*
50  * RPC tasks sit here while waiting for conditions to improve.
51  */
52 static struct rpc_wait_queue delay_queue;
53 
54 /*
55  * rpciod-related stuff
56  */
57 struct workqueue_struct *rpciod_workqueue;
58 
59 /*
60  * Disable the timer for a given RPC task. Should be called with
61  * queue->lock and bh_disabled in order to avoid races within
62  * rpc_run_timer().
63  */
64 static void
__rpc_disable_timer(struct rpc_wait_queue * queue,struct rpc_task * task)65 __rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
66 {
67 	if (task->tk_timeout == 0)
68 		return;
69 	dprintk("RPC: %5u disabling timer\n", task->tk_pid);
70 	task->tk_timeout = 0;
71 	list_del(&task->u.tk_wait.timer_list);
72 	if (list_empty(&queue->timer_list.list))
73 		del_timer(&queue->timer_list.timer);
74 }
75 
76 static void
rpc_set_queue_timer(struct rpc_wait_queue * queue,unsigned long expires)77 rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires)
78 {
79 	queue->timer_list.expires = expires;
80 	mod_timer(&queue->timer_list.timer, expires);
81 }
82 
83 /*
84  * Set up a timer for the current task.
85  */
86 static void
__rpc_add_timer(struct rpc_wait_queue * queue,struct rpc_task * task)87 __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
88 {
89 	if (!task->tk_timeout)
90 		return;
91 
92 	dprintk("RPC: %5u setting alarm for %lu ms\n",
93 			task->tk_pid, task->tk_timeout * 1000 / HZ);
94 
95 	task->u.tk_wait.expires = jiffies + task->tk_timeout;
96 	if (list_empty(&queue->timer_list.list) || time_before(task->u.tk_wait.expires, queue->timer_list.expires))
97 		rpc_set_queue_timer(queue, task->u.tk_wait.expires);
98 	list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list);
99 }
100 
rpc_rotate_queue_owner(struct rpc_wait_queue * queue)101 static void rpc_rotate_queue_owner(struct rpc_wait_queue *queue)
102 {
103 	struct list_head *q = &queue->tasks[queue->priority];
104 	struct rpc_task *task;
105 
106 	if (!list_empty(q)) {
107 		task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
108 		if (task->tk_owner == queue->owner)
109 			list_move_tail(&task->u.tk_wait.list, q);
110 	}
111 }
112 
rpc_set_waitqueue_priority(struct rpc_wait_queue * queue,int priority)113 static void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
114 {
115 	if (queue->priority != priority) {
116 		/* Fairness: rotate the list when changing priority */
117 		rpc_rotate_queue_owner(queue);
118 		queue->priority = priority;
119 	}
120 }
121 
rpc_set_waitqueue_owner(struct rpc_wait_queue * queue,pid_t pid)122 static void rpc_set_waitqueue_owner(struct rpc_wait_queue *queue, pid_t pid)
123 {
124 	queue->owner = pid;
125 	queue->nr = RPC_BATCH_COUNT;
126 }
127 
rpc_reset_waitqueue_priority(struct rpc_wait_queue * queue)128 static void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue)
129 {
130 	rpc_set_waitqueue_priority(queue, queue->maxpriority);
131 	rpc_set_waitqueue_owner(queue, 0);
132 }
133 
134 /*
135  * Add new request to a priority queue.
136  */
__rpc_add_wait_queue_priority(struct rpc_wait_queue * queue,struct rpc_task * task,unsigned char queue_priority)137 static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue,
138 		struct rpc_task *task,
139 		unsigned char queue_priority)
140 {
141 	struct list_head *q;
142 	struct rpc_task *t;
143 
144 	INIT_LIST_HEAD(&task->u.tk_wait.links);
145 	if (unlikely(queue_priority > queue->maxpriority))
146 		queue_priority = queue->maxpriority;
147 	if (queue_priority > queue->priority)
148 		rpc_set_waitqueue_priority(queue, queue_priority);
149 	q = &queue->tasks[queue_priority];
150 	list_for_each_entry(t, q, u.tk_wait.list) {
151 		if (t->tk_owner == task->tk_owner) {
152 			list_add_tail(&task->u.tk_wait.list, &t->u.tk_wait.links);
153 			return;
154 		}
155 	}
156 	list_add_tail(&task->u.tk_wait.list, q);
157 }
158 
159 /*
160  * Add new request to wait queue.
161  *
162  * Swapper tasks always get inserted at the head of the queue.
163  * This should avoid many nasty memory deadlocks and hopefully
164  * improve overall performance.
165  * Everyone else gets appended to the queue to ensure proper FIFO behavior.
166  */
__rpc_add_wait_queue(struct rpc_wait_queue * queue,struct rpc_task * task,unsigned char queue_priority)167 static void __rpc_add_wait_queue(struct rpc_wait_queue *queue,
168 		struct rpc_task *task,
169 		unsigned char queue_priority)
170 {
171 	WARN_ON_ONCE(RPC_IS_QUEUED(task));
172 	if (RPC_IS_QUEUED(task))
173 		return;
174 
175 	if (RPC_IS_PRIORITY(queue))
176 		__rpc_add_wait_queue_priority(queue, task, queue_priority);
177 	else if (RPC_IS_SWAPPER(task))
178 		list_add(&task->u.tk_wait.list, &queue->tasks[0]);
179 	else
180 		list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
181 	task->tk_waitqueue = queue;
182 	queue->qlen++;
183 	/* barrier matches the read in rpc_wake_up_task_queue_locked() */
184 	smp_wmb();
185 	rpc_set_queued(task);
186 
187 	dprintk("RPC: %5u added to queue %p \"%s\"\n",
188 			task->tk_pid, queue, rpc_qname(queue));
189 }
190 
191 /*
192  * Remove request from a priority queue.
193  */
__rpc_remove_wait_queue_priority(struct rpc_task * task)194 static void __rpc_remove_wait_queue_priority(struct rpc_task *task)
195 {
196 	struct rpc_task *t;
197 
198 	if (!list_empty(&task->u.tk_wait.links)) {
199 		t = list_entry(task->u.tk_wait.links.next, struct rpc_task, u.tk_wait.list);
200 		list_move(&t->u.tk_wait.list, &task->u.tk_wait.list);
201 		list_splice_init(&task->u.tk_wait.links, &t->u.tk_wait.links);
202 	}
203 }
204 
205 /*
206  * Remove request from queue.
207  * Note: must be called with spin lock held.
208  */
__rpc_remove_wait_queue(struct rpc_wait_queue * queue,struct rpc_task * task)209 static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
210 {
211 	__rpc_disable_timer(queue, task);
212 	if (RPC_IS_PRIORITY(queue))
213 		__rpc_remove_wait_queue_priority(task);
214 	list_del(&task->u.tk_wait.list);
215 	queue->qlen--;
216 	dprintk("RPC: %5u removed from queue %p \"%s\"\n",
217 			task->tk_pid, queue, rpc_qname(queue));
218 }
219 
__rpc_init_priority_wait_queue(struct rpc_wait_queue * queue,const char * qname,unsigned char nr_queues)220 static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues)
221 {
222 	int i;
223 
224 	spin_lock_init(&queue->lock);
225 	for (i = 0; i < ARRAY_SIZE(queue->tasks); i++)
226 		INIT_LIST_HEAD(&queue->tasks[i]);
227 	queue->maxpriority = nr_queues - 1;
228 	rpc_reset_waitqueue_priority(queue);
229 	queue->qlen = 0;
230 	setup_timer(&queue->timer_list.timer, __rpc_queue_timer_fn, (unsigned long)queue);
231 	INIT_LIST_HEAD(&queue->timer_list.list);
232 	rpc_assign_waitqueue_name(queue, qname);
233 }
234 
rpc_init_priority_wait_queue(struct rpc_wait_queue * queue,const char * qname)235 void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname)
236 {
237 	__rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY);
238 }
239 EXPORT_SYMBOL_GPL(rpc_init_priority_wait_queue);
240 
rpc_init_wait_queue(struct rpc_wait_queue * queue,const char * qname)241 void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname)
242 {
243 	__rpc_init_priority_wait_queue(queue, qname, 1);
244 }
245 EXPORT_SYMBOL_GPL(rpc_init_wait_queue);
246 
rpc_destroy_wait_queue(struct rpc_wait_queue * queue)247 void rpc_destroy_wait_queue(struct rpc_wait_queue *queue)
248 {
249 	del_timer_sync(&queue->timer_list.timer);
250 }
251 EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue);
252 
rpc_wait_bit_killable(void * word)253 static int rpc_wait_bit_killable(void *word)
254 {
255 	if (fatal_signal_pending(current))
256 		return -ERESTARTSYS;
257 	freezable_schedule_unsafe();
258 	return 0;
259 }
260 
261 #ifdef RPC_DEBUG
rpc_task_set_debuginfo(struct rpc_task * task)262 static void rpc_task_set_debuginfo(struct rpc_task *task)
263 {
264 	static atomic_t rpc_pid;
265 
266 	task->tk_pid = atomic_inc_return(&rpc_pid);
267 }
268 #else
rpc_task_set_debuginfo(struct rpc_task * task)269 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
270 {
271 }
272 #endif
273 
rpc_set_active(struct rpc_task * task)274 static void rpc_set_active(struct rpc_task *task)
275 {
276 	trace_rpc_task_begin(task->tk_client, task, NULL);
277 
278 	rpc_task_set_debuginfo(task);
279 	set_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
280 }
281 
282 /*
283  * Mark an RPC call as having completed by clearing the 'active' bit
284  * and then waking up all tasks that were sleeping.
285  */
rpc_complete_task(struct rpc_task * task)286 static int rpc_complete_task(struct rpc_task *task)
287 {
288 	void *m = &task->tk_runstate;
289 	wait_queue_head_t *wq = bit_waitqueue(m, RPC_TASK_ACTIVE);
290 	struct wait_bit_key k = __WAIT_BIT_KEY_INITIALIZER(m, RPC_TASK_ACTIVE);
291 	unsigned long flags;
292 	int ret;
293 
294 	trace_rpc_task_complete(task->tk_client, task, NULL);
295 
296 	spin_lock_irqsave(&wq->lock, flags);
297 	clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
298 	ret = atomic_dec_and_test(&task->tk_count);
299 	if (waitqueue_active(wq))
300 		__wake_up_locked_key(wq, TASK_NORMAL, &k);
301 	spin_unlock_irqrestore(&wq->lock, flags);
302 	return ret;
303 }
304 
305 /*
306  * Allow callers to wait for completion of an RPC call
307  *
308  * Note the use of out_of_line_wait_on_bit() rather than wait_on_bit()
309  * to enforce taking of the wq->lock and hence avoid races with
310  * rpc_complete_task().
311  */
__rpc_wait_for_completion_task(struct rpc_task * task,int (* action)(void *))312 int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *))
313 {
314 	if (action == NULL)
315 		action = rpc_wait_bit_killable;
316 	return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
317 			action, TASK_KILLABLE);
318 }
319 EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
320 
321 /*
322  * Make an RPC task runnable.
323  *
324  * Note: If the task is ASYNC, and is being made runnable after sitting on an
325  * rpc_wait_queue, this must be called with the queue spinlock held to protect
326  * the wait queue operation.
327  * Note the ordering of rpc_test_and_set_running() and rpc_clear_queued(),
328  * which is needed to ensure that __rpc_execute() doesn't loop (due to the
329  * lockless RPC_IS_QUEUED() test) before we've had a chance to test
330  * the RPC_TASK_RUNNING flag.
331  */
rpc_make_runnable(struct rpc_task * task)332 static void rpc_make_runnable(struct rpc_task *task)
333 {
334 	bool need_wakeup = !rpc_test_and_set_running(task);
335 
336 	rpc_clear_queued(task);
337 	if (!need_wakeup)
338 		return;
339 	if (RPC_IS_ASYNC(task)) {
340 		INIT_WORK(&task->u.tk_work, rpc_async_schedule);
341 		queue_work(rpciod_workqueue, &task->u.tk_work);
342 	} else
343 		wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED);
344 }
345 
346 /*
347  * Prepare for sleeping on a wait queue.
348  * By always appending tasks to the list we ensure FIFO behavior.
349  * NB: An RPC task will only receive interrupt-driven events as long
350  * as it's on a wait queue.
351  */
__rpc_sleep_on_priority(struct rpc_wait_queue * q,struct rpc_task * task,rpc_action action,unsigned char queue_priority)352 static void __rpc_sleep_on_priority(struct rpc_wait_queue *q,
353 		struct rpc_task *task,
354 		rpc_action action,
355 		unsigned char queue_priority)
356 {
357 	dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n",
358 			task->tk_pid, rpc_qname(q), jiffies);
359 
360 	trace_rpc_task_sleep(task->tk_client, task, q);
361 
362 	__rpc_add_wait_queue(q, task, queue_priority);
363 
364 	WARN_ON_ONCE(task->tk_callback != NULL);
365 	task->tk_callback = action;
366 	__rpc_add_timer(q, task);
367 }
368 
rpc_sleep_on(struct rpc_wait_queue * q,struct rpc_task * task,rpc_action action)369 void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
370 				rpc_action action)
371 {
372 	/* We shouldn't ever put an inactive task to sleep */
373 	WARN_ON_ONCE(!RPC_IS_ACTIVATED(task));
374 	if (!RPC_IS_ACTIVATED(task)) {
375 		task->tk_status = -EIO;
376 		rpc_put_task_async(task);
377 		return;
378 	}
379 
380 	/*
381 	 * Protect the queue operations.
382 	 */
383 	spin_lock_bh(&q->lock);
384 	__rpc_sleep_on_priority(q, task, action, task->tk_priority);
385 	spin_unlock_bh(&q->lock);
386 }
387 EXPORT_SYMBOL_GPL(rpc_sleep_on);
388 
rpc_sleep_on_priority(struct rpc_wait_queue * q,struct rpc_task * task,rpc_action action,int priority)389 void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task,
390 		rpc_action action, int priority)
391 {
392 	/* We shouldn't ever put an inactive task to sleep */
393 	WARN_ON_ONCE(!RPC_IS_ACTIVATED(task));
394 	if (!RPC_IS_ACTIVATED(task)) {
395 		task->tk_status = -EIO;
396 		rpc_put_task_async(task);
397 		return;
398 	}
399 
400 	/*
401 	 * Protect the queue operations.
402 	 */
403 	spin_lock_bh(&q->lock);
404 	__rpc_sleep_on_priority(q, task, action, priority - RPC_PRIORITY_LOW);
405 	spin_unlock_bh(&q->lock);
406 }
407 EXPORT_SYMBOL_GPL(rpc_sleep_on_priority);
408 
409 /**
410  * __rpc_do_wake_up_task - wake up a single rpc_task
411  * @queue: wait queue
412  * @task: task to be woken up
413  *
414  * Caller must hold queue->lock, and have cleared the task queued flag.
415  */
__rpc_do_wake_up_task(struct rpc_wait_queue * queue,struct rpc_task * task)416 static void __rpc_do_wake_up_task(struct rpc_wait_queue *queue, struct rpc_task *task)
417 {
418 	dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n",
419 			task->tk_pid, jiffies);
420 
421 	/* Has the task been executed yet? If not, we cannot wake it up! */
422 	if (!RPC_IS_ACTIVATED(task)) {
423 		printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task);
424 		return;
425 	}
426 
427 	trace_rpc_task_wakeup(task->tk_client, task, queue);
428 
429 	__rpc_remove_wait_queue(queue, task);
430 
431 	rpc_make_runnable(task);
432 
433 	dprintk("RPC:       __rpc_wake_up_task done\n");
434 }
435 
436 /*
437  * Wake up a queued task while the queue lock is being held
438  */
rpc_wake_up_task_queue_locked(struct rpc_wait_queue * queue,struct rpc_task * task)439 static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task)
440 {
441 	if (RPC_IS_QUEUED(task)) {
442 		smp_rmb();
443 		if (task->tk_waitqueue == queue)
444 			__rpc_do_wake_up_task(queue, task);
445 	}
446 }
447 
448 /*
449  * Tests whether rpc queue is empty
450  */
rpc_queue_empty(struct rpc_wait_queue * queue)451 int rpc_queue_empty(struct rpc_wait_queue *queue)
452 {
453 	int res;
454 
455 	spin_lock_bh(&queue->lock);
456 	res = queue->qlen;
457 	spin_unlock_bh(&queue->lock);
458 	return res == 0;
459 }
460 EXPORT_SYMBOL_GPL(rpc_queue_empty);
461 
462 /*
463  * Wake up a task on a specific queue
464  */
rpc_wake_up_queued_task(struct rpc_wait_queue * queue,struct rpc_task * task)465 void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task)
466 {
467 	spin_lock_bh(&queue->lock);
468 	rpc_wake_up_task_queue_locked(queue, task);
469 	spin_unlock_bh(&queue->lock);
470 }
471 EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task);
472 
473 /*
474  * Wake up the next task on a priority queue.
475  */
__rpc_find_next_queued_priority(struct rpc_wait_queue * queue)476 static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *queue)
477 {
478 	struct list_head *q;
479 	struct rpc_task *task;
480 
481 	/*
482 	 * Service a batch of tasks from a single owner.
483 	 */
484 	q = &queue->tasks[queue->priority];
485 	if (!list_empty(q)) {
486 		task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
487 		if (queue->owner == task->tk_owner) {
488 			if (--queue->nr)
489 				goto out;
490 			list_move_tail(&task->u.tk_wait.list, q);
491 		}
492 		/*
493 		 * Check if we need to switch queues.
494 		 */
495 		goto new_owner;
496 	}
497 
498 	/*
499 	 * Service the next queue.
500 	 */
501 	do {
502 		if (q == &queue->tasks[0])
503 			q = &queue->tasks[queue->maxpriority];
504 		else
505 			q = q - 1;
506 		if (!list_empty(q)) {
507 			task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
508 			goto new_queue;
509 		}
510 	} while (q != &queue->tasks[queue->priority]);
511 
512 	rpc_reset_waitqueue_priority(queue);
513 	return NULL;
514 
515 new_queue:
516 	rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0]));
517 new_owner:
518 	rpc_set_waitqueue_owner(queue, task->tk_owner);
519 out:
520 	return task;
521 }
522 
__rpc_find_next_queued(struct rpc_wait_queue * queue)523 static struct rpc_task *__rpc_find_next_queued(struct rpc_wait_queue *queue)
524 {
525 	if (RPC_IS_PRIORITY(queue))
526 		return __rpc_find_next_queued_priority(queue);
527 	if (!list_empty(&queue->tasks[0]))
528 		return list_first_entry(&queue->tasks[0], struct rpc_task, u.tk_wait.list);
529 	return NULL;
530 }
531 
532 /*
533  * Wake up the first task on the wait queue.
534  */
rpc_wake_up_first(struct rpc_wait_queue * queue,bool (* func)(struct rpc_task *,void *),void * data)535 struct rpc_task *rpc_wake_up_first(struct rpc_wait_queue *queue,
536 		bool (*func)(struct rpc_task *, void *), void *data)
537 {
538 	struct rpc_task	*task = NULL;
539 
540 	dprintk("RPC:       wake_up_first(%p \"%s\")\n",
541 			queue, rpc_qname(queue));
542 	spin_lock_bh(&queue->lock);
543 	task = __rpc_find_next_queued(queue);
544 	if (task != NULL) {
545 		if (func(task, data))
546 			rpc_wake_up_task_queue_locked(queue, task);
547 		else
548 			task = NULL;
549 	}
550 	spin_unlock_bh(&queue->lock);
551 
552 	return task;
553 }
554 EXPORT_SYMBOL_GPL(rpc_wake_up_first);
555 
rpc_wake_up_next_func(struct rpc_task * task,void * data)556 static bool rpc_wake_up_next_func(struct rpc_task *task, void *data)
557 {
558 	return true;
559 }
560 
561 /*
562  * Wake up the next task on the wait queue.
563 */
rpc_wake_up_next(struct rpc_wait_queue * queue)564 struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *queue)
565 {
566 	return rpc_wake_up_first(queue, rpc_wake_up_next_func, NULL);
567 }
568 EXPORT_SYMBOL_GPL(rpc_wake_up_next);
569 
570 /**
571  * rpc_wake_up - wake up all rpc_tasks
572  * @queue: rpc_wait_queue on which the tasks are sleeping
573  *
574  * Grabs queue->lock
575  */
rpc_wake_up(struct rpc_wait_queue * queue)576 void rpc_wake_up(struct rpc_wait_queue *queue)
577 {
578 	struct list_head *head;
579 
580 	spin_lock_bh(&queue->lock);
581 	head = &queue->tasks[queue->maxpriority];
582 	for (;;) {
583 		while (!list_empty(head)) {
584 			struct rpc_task *task;
585 			task = list_first_entry(head,
586 					struct rpc_task,
587 					u.tk_wait.list);
588 			rpc_wake_up_task_queue_locked(queue, task);
589 		}
590 		if (head == &queue->tasks[0])
591 			break;
592 		head--;
593 	}
594 	spin_unlock_bh(&queue->lock);
595 }
596 EXPORT_SYMBOL_GPL(rpc_wake_up);
597 
598 /**
599  * rpc_wake_up_status - wake up all rpc_tasks and set their status value.
600  * @queue: rpc_wait_queue on which the tasks are sleeping
601  * @status: status value to set
602  *
603  * Grabs queue->lock
604  */
rpc_wake_up_status(struct rpc_wait_queue * queue,int status)605 void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
606 {
607 	struct list_head *head;
608 
609 	spin_lock_bh(&queue->lock);
610 	head = &queue->tasks[queue->maxpriority];
611 	for (;;) {
612 		while (!list_empty(head)) {
613 			struct rpc_task *task;
614 			task = list_first_entry(head,
615 					struct rpc_task,
616 					u.tk_wait.list);
617 			task->tk_status = status;
618 			rpc_wake_up_task_queue_locked(queue, task);
619 		}
620 		if (head == &queue->tasks[0])
621 			break;
622 		head--;
623 	}
624 	spin_unlock_bh(&queue->lock);
625 }
626 EXPORT_SYMBOL_GPL(rpc_wake_up_status);
627 
__rpc_queue_timer_fn(unsigned long ptr)628 static void __rpc_queue_timer_fn(unsigned long ptr)
629 {
630 	struct rpc_wait_queue *queue = (struct rpc_wait_queue *)ptr;
631 	struct rpc_task *task, *n;
632 	unsigned long expires, now, timeo;
633 
634 	spin_lock(&queue->lock);
635 	expires = now = jiffies;
636 	list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) {
637 		timeo = task->u.tk_wait.expires;
638 		if (time_after_eq(now, timeo)) {
639 			dprintk("RPC: %5u timeout\n", task->tk_pid);
640 			task->tk_status = -ETIMEDOUT;
641 			rpc_wake_up_task_queue_locked(queue, task);
642 			continue;
643 		}
644 		if (expires == now || time_after(expires, timeo))
645 			expires = timeo;
646 	}
647 	if (!list_empty(&queue->timer_list.list))
648 		rpc_set_queue_timer(queue, expires);
649 	spin_unlock(&queue->lock);
650 }
651 
__rpc_atrun(struct rpc_task * task)652 static void __rpc_atrun(struct rpc_task *task)
653 {
654 	task->tk_status = 0;
655 }
656 
657 /*
658  * Run a task at a later time
659  */
rpc_delay(struct rpc_task * task,unsigned long delay)660 void rpc_delay(struct rpc_task *task, unsigned long delay)
661 {
662 	task->tk_timeout = delay;
663 	rpc_sleep_on(&delay_queue, task, __rpc_atrun);
664 }
665 EXPORT_SYMBOL_GPL(rpc_delay);
666 
667 /*
668  * Helper to call task->tk_ops->rpc_call_prepare
669  */
rpc_prepare_task(struct rpc_task * task)670 void rpc_prepare_task(struct rpc_task *task)
671 {
672 	task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
673 }
674 
675 static void
rpc_init_task_statistics(struct rpc_task * task)676 rpc_init_task_statistics(struct rpc_task *task)
677 {
678 	/* Initialize retry counters */
679 	task->tk_garb_retry = 2;
680 	task->tk_cred_retry = 2;
681 	task->tk_rebind_retry = 2;
682 
683 	/* starting timestamp */
684 	task->tk_start = ktime_get();
685 }
686 
687 static void
rpc_reset_task_statistics(struct rpc_task * task)688 rpc_reset_task_statistics(struct rpc_task *task)
689 {
690 	task->tk_timeouts = 0;
691 	task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_KILLED|RPC_TASK_SENT);
692 
693 	rpc_init_task_statistics(task);
694 }
695 
696 /*
697  * Helper that calls task->tk_ops->rpc_call_done if it exists
698  */
rpc_exit_task(struct rpc_task * task)699 void rpc_exit_task(struct rpc_task *task)
700 {
701 	task->tk_action = NULL;
702 	if (task->tk_ops->rpc_call_done != NULL) {
703 		task->tk_ops->rpc_call_done(task, task->tk_calldata);
704 		if (task->tk_action != NULL) {
705 			WARN_ON(RPC_ASSASSINATED(task));
706 			/* Always release the RPC slot and buffer memory */
707 			xprt_release(task);
708 			rpc_reset_task_statistics(task);
709 		}
710 	}
711 }
712 
rpc_exit(struct rpc_task * task,int status)713 void rpc_exit(struct rpc_task *task, int status)
714 {
715 	task->tk_status = status;
716 	task->tk_action = rpc_exit_task;
717 	if (RPC_IS_QUEUED(task))
718 		rpc_wake_up_queued_task(task->tk_waitqueue, task);
719 }
720 EXPORT_SYMBOL_GPL(rpc_exit);
721 
rpc_release_calldata(const struct rpc_call_ops * ops,void * calldata)722 void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata)
723 {
724 	if (ops->rpc_release != NULL)
725 		ops->rpc_release(calldata);
726 }
727 
728 /*
729  * This is the RPC `scheduler' (or rather, the finite state machine).
730  */
__rpc_execute(struct rpc_task * task)731 static void __rpc_execute(struct rpc_task *task)
732 {
733 	struct rpc_wait_queue *queue;
734 	int task_is_async = RPC_IS_ASYNC(task);
735 	int status = 0;
736 
737 	dprintk("RPC: %5u __rpc_execute flags=0x%x\n",
738 			task->tk_pid, task->tk_flags);
739 
740 	WARN_ON_ONCE(RPC_IS_QUEUED(task));
741 	if (RPC_IS_QUEUED(task))
742 		return;
743 
744 	for (;;) {
745 		void (*do_action)(struct rpc_task *);
746 
747 		/*
748 		 * Execute any pending callback first.
749 		 */
750 		do_action = task->tk_callback;
751 		task->tk_callback = NULL;
752 		if (do_action == NULL) {
753 			/*
754 			 * Perform the next FSM step.
755 			 * tk_action may be NULL if the task has been killed.
756 			 * In particular, note that rpc_killall_tasks may
757 			 * do this at any time, so beware when dereferencing.
758 			 */
759 			do_action = task->tk_action;
760 			if (do_action == NULL)
761 				break;
762 		}
763 		trace_rpc_task_run_action(task->tk_client, task, task->tk_action);
764 		do_action(task);
765 
766 		/*
767 		 * Lockless check for whether task is sleeping or not.
768 		 */
769 		if (!RPC_IS_QUEUED(task))
770 			continue;
771 		/*
772 		 * The queue->lock protects against races with
773 		 * rpc_make_runnable().
774 		 *
775 		 * Note that once we clear RPC_TASK_RUNNING on an asynchronous
776 		 * rpc_task, rpc_make_runnable() can assign it to a
777 		 * different workqueue. We therefore cannot assume that the
778 		 * rpc_task pointer may still be dereferenced.
779 		 */
780 		queue = task->tk_waitqueue;
781 		spin_lock_bh(&queue->lock);
782 		if (!RPC_IS_QUEUED(task)) {
783 			spin_unlock_bh(&queue->lock);
784 			continue;
785 		}
786 		rpc_clear_running(task);
787 		spin_unlock_bh(&queue->lock);
788 		if (task_is_async)
789 			return;
790 
791 		/* sync task: sleep here */
792 		dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid);
793 		status = out_of_line_wait_on_bit(&task->tk_runstate,
794 				RPC_TASK_QUEUED, rpc_wait_bit_killable,
795 				TASK_KILLABLE);
796 		if (status == -ERESTARTSYS) {
797 			/*
798 			 * When a sync task receives a signal, it exits with
799 			 * -ERESTARTSYS. In order to catch any callbacks that
800 			 * clean up after sleeping on some queue, we don't
801 			 * break the loop here, but go around once more.
802 			 */
803 			dprintk("RPC: %5u got signal\n", task->tk_pid);
804 			task->tk_flags |= RPC_TASK_KILLED;
805 			rpc_exit(task, -ERESTARTSYS);
806 		}
807 		rpc_set_running(task);
808 		dprintk("RPC: %5u sync task resuming\n", task->tk_pid);
809 	}
810 
811 	dprintk("RPC: %5u return %d, status %d\n", task->tk_pid, status,
812 			task->tk_status);
813 	/* Release all resources associated with the task */
814 	rpc_release_task(task);
815 }
816 
817 /*
818  * User-visible entry point to the scheduler.
819  *
820  * This may be called recursively if e.g. an async NFS task updates
821  * the attributes and finds that dirty pages must be flushed.
822  * NOTE: Upon exit of this function the task is guaranteed to be
823  *	 released. In particular note that tk_release() will have
824  *	 been called, so your task memory may have been freed.
825  */
rpc_execute(struct rpc_task * task)826 void rpc_execute(struct rpc_task *task)
827 {
828 	rpc_set_active(task);
829 	rpc_make_runnable(task);
830 	if (!RPC_IS_ASYNC(task))
831 		__rpc_execute(task);
832 }
833 
rpc_async_schedule(struct work_struct * work)834 static void rpc_async_schedule(struct work_struct *work)
835 {
836 	current->flags |= PF_FSTRANS;
837 	__rpc_execute(container_of(work, struct rpc_task, u.tk_work));
838 	current->flags &= ~PF_FSTRANS;
839 }
840 
841 /**
842  * rpc_malloc - allocate an RPC buffer
843  * @task: RPC task that will use this buffer
844  * @size: requested byte size
845  *
846  * To prevent rpciod from hanging, this allocator never sleeps,
847  * returning NULL if the request cannot be serviced immediately.
848  * The caller can arrange to sleep in a way that is safe for rpciod.
849  *
850  * Most requests are 'small' (under 2KiB) and can be serviced from a
851  * mempool, ensuring that NFS reads and writes can always proceed,
852  * and that there is good locality of reference for these buffers.
853  *
854  * In order to avoid memory starvation triggering more writebacks of
855  * NFS requests, we avoid using GFP_KERNEL.
856  */
rpc_malloc(struct rpc_task * task,size_t size)857 void *rpc_malloc(struct rpc_task *task, size_t size)
858 {
859 	struct rpc_buffer *buf;
860 	gfp_t gfp = GFP_NOWAIT;
861 
862 	if (RPC_IS_SWAPPER(task))
863 		gfp |= __GFP_MEMALLOC;
864 
865 	size += sizeof(struct rpc_buffer);
866 	if (size <= RPC_BUFFER_MAXSIZE)
867 		buf = mempool_alloc(rpc_buffer_mempool, gfp);
868 	else
869 		buf = kmalloc(size, gfp);
870 
871 	if (!buf)
872 		return NULL;
873 
874 	buf->len = size;
875 	dprintk("RPC: %5u allocated buffer of size %zu at %p\n",
876 			task->tk_pid, size, buf);
877 	return &buf->data;
878 }
879 EXPORT_SYMBOL_GPL(rpc_malloc);
880 
881 /**
882  * rpc_free - free buffer allocated via rpc_malloc
883  * @buffer: buffer to free
884  *
885  */
rpc_free(void * buffer)886 void rpc_free(void *buffer)
887 {
888 	size_t size;
889 	struct rpc_buffer *buf;
890 
891 	if (!buffer)
892 		return;
893 
894 	buf = container_of(buffer, struct rpc_buffer, data);
895 	size = buf->len;
896 
897 	dprintk("RPC:       freeing buffer of size %zu at %p\n",
898 			size, buf);
899 
900 	if (size <= RPC_BUFFER_MAXSIZE)
901 		mempool_free(buf, rpc_buffer_mempool);
902 	else
903 		kfree(buf);
904 }
905 EXPORT_SYMBOL_GPL(rpc_free);
906 
907 /*
908  * Creation and deletion of RPC task structures
909  */
rpc_init_task(struct rpc_task * task,const struct rpc_task_setup * task_setup_data)910 static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setup_data)
911 {
912 	memset(task, 0, sizeof(*task));
913 	atomic_set(&task->tk_count, 1);
914 	task->tk_flags  = task_setup_data->flags;
915 	task->tk_ops = task_setup_data->callback_ops;
916 	task->tk_calldata = task_setup_data->callback_data;
917 	INIT_LIST_HEAD(&task->tk_task);
918 
919 	task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW;
920 	task->tk_owner = current->tgid;
921 
922 	/* Initialize workqueue for async tasks */
923 	task->tk_workqueue = task_setup_data->workqueue;
924 
925 	if (task->tk_ops->rpc_call_prepare != NULL)
926 		task->tk_action = rpc_prepare_task;
927 
928 	rpc_init_task_statistics(task);
929 
930 	dprintk("RPC:       new task initialized, procpid %u\n",
931 				task_pid_nr(current));
932 }
933 
934 static struct rpc_task *
rpc_alloc_task(void)935 rpc_alloc_task(void)
936 {
937 	return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOIO);
938 }
939 
940 /*
941  * Create a new task for the specified client.
942  */
rpc_new_task(const struct rpc_task_setup * setup_data)943 struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data)
944 {
945 	struct rpc_task	*task = setup_data->task;
946 	unsigned short flags = 0;
947 
948 	if (task == NULL) {
949 		task = rpc_alloc_task();
950 		if (task == NULL) {
951 			rpc_release_calldata(setup_data->callback_ops,
952 					setup_data->callback_data);
953 			return ERR_PTR(-ENOMEM);
954 		}
955 		flags = RPC_TASK_DYNAMIC;
956 	}
957 
958 	rpc_init_task(task, setup_data);
959 	task->tk_flags |= flags;
960 	dprintk("RPC:       allocated task %p\n", task);
961 	return task;
962 }
963 
964 /*
965  * rpc_free_task - release rpc task and perform cleanups
966  *
967  * Note that we free up the rpc_task _after_ rpc_release_calldata()
968  * in order to work around a workqueue dependency issue.
969  *
970  * Tejun Heo states:
971  * "Workqueue currently considers two work items to be the same if they're
972  * on the same address and won't execute them concurrently - ie. it
973  * makes a work item which is queued again while being executed wait
974  * for the previous execution to complete.
975  *
976  * If a work function frees the work item, and then waits for an event
977  * which should be performed by another work item and *that* work item
978  * recycles the freed work item, it can create a false dependency loop.
979  * There really is no reliable way to detect this short of verifying
980  * every memory free."
981  *
982  */
rpc_free_task(struct rpc_task * task)983 static void rpc_free_task(struct rpc_task *task)
984 {
985 	unsigned short tk_flags = task->tk_flags;
986 
987 	rpc_release_calldata(task->tk_ops, task->tk_calldata);
988 
989 	if (tk_flags & RPC_TASK_DYNAMIC) {
990 		dprintk("RPC: %5u freeing task\n", task->tk_pid);
991 		mempool_free(task, rpc_task_mempool);
992 	}
993 }
994 
rpc_async_release(struct work_struct * work)995 static void rpc_async_release(struct work_struct *work)
996 {
997 	rpc_free_task(container_of(work, struct rpc_task, u.tk_work));
998 }
999 
rpc_release_resources_task(struct rpc_task * task)1000 static void rpc_release_resources_task(struct rpc_task *task)
1001 {
1002 	xprt_release(task);
1003 	if (task->tk_msg.rpc_cred) {
1004 		put_rpccred(task->tk_msg.rpc_cred);
1005 		task->tk_msg.rpc_cred = NULL;
1006 	}
1007 	rpc_task_release_client(task);
1008 }
1009 
rpc_final_put_task(struct rpc_task * task,struct workqueue_struct * q)1010 static void rpc_final_put_task(struct rpc_task *task,
1011 		struct workqueue_struct *q)
1012 {
1013 	if (q != NULL) {
1014 		INIT_WORK(&task->u.tk_work, rpc_async_release);
1015 		queue_work(q, &task->u.tk_work);
1016 	} else
1017 		rpc_free_task(task);
1018 }
1019 
rpc_do_put_task(struct rpc_task * task,struct workqueue_struct * q)1020 static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q)
1021 {
1022 	if (atomic_dec_and_test(&task->tk_count)) {
1023 		rpc_release_resources_task(task);
1024 		rpc_final_put_task(task, q);
1025 	}
1026 }
1027 
rpc_put_task(struct rpc_task * task)1028 void rpc_put_task(struct rpc_task *task)
1029 {
1030 	rpc_do_put_task(task, NULL);
1031 }
1032 EXPORT_SYMBOL_GPL(rpc_put_task);
1033 
rpc_put_task_async(struct rpc_task * task)1034 void rpc_put_task_async(struct rpc_task *task)
1035 {
1036 	rpc_do_put_task(task, task->tk_workqueue);
1037 }
1038 EXPORT_SYMBOL_GPL(rpc_put_task_async);
1039 
rpc_release_task(struct rpc_task * task)1040 static void rpc_release_task(struct rpc_task *task)
1041 {
1042 	dprintk("RPC: %5u release task\n", task->tk_pid);
1043 
1044 	WARN_ON_ONCE(RPC_IS_QUEUED(task));
1045 
1046 	rpc_release_resources_task(task);
1047 
1048 	/*
1049 	 * Note: at this point we have been removed from rpc_clnt->cl_tasks,
1050 	 * so it should be safe to use task->tk_count as a test for whether
1051 	 * or not any other processes still hold references to our rpc_task.
1052 	 */
1053 	if (atomic_read(&task->tk_count) != 1 + !RPC_IS_ASYNC(task)) {
1054 		/* Wake up anyone who may be waiting for task completion */
1055 		if (!rpc_complete_task(task))
1056 			return;
1057 	} else {
1058 		if (!atomic_dec_and_test(&task->tk_count))
1059 			return;
1060 	}
1061 	rpc_final_put_task(task, task->tk_workqueue);
1062 }
1063 
rpciod_up(void)1064 int rpciod_up(void)
1065 {
1066 	return try_module_get(THIS_MODULE) ? 0 : -EINVAL;
1067 }
1068 
rpciod_down(void)1069 void rpciod_down(void)
1070 {
1071 	module_put(THIS_MODULE);
1072 }
1073 
1074 /*
1075  * Start up the rpciod workqueue.
1076  */
rpciod_start(void)1077 static int rpciod_start(void)
1078 {
1079 	struct workqueue_struct *wq;
1080 
1081 	/*
1082 	 * Create the rpciod thread and wait for it to start.
1083 	 */
1084 	dprintk("RPC:       creating workqueue rpciod\n");
1085 	wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM, 1);
1086 	rpciod_workqueue = wq;
1087 	return rpciod_workqueue != NULL;
1088 }
1089 
rpciod_stop(void)1090 static void rpciod_stop(void)
1091 {
1092 	struct workqueue_struct *wq = NULL;
1093 
1094 	if (rpciod_workqueue == NULL)
1095 		return;
1096 	dprintk("RPC:       destroying workqueue rpciod\n");
1097 
1098 	wq = rpciod_workqueue;
1099 	rpciod_workqueue = NULL;
1100 	destroy_workqueue(wq);
1101 }
1102 
1103 void
rpc_destroy_mempool(void)1104 rpc_destroy_mempool(void)
1105 {
1106 	rpciod_stop();
1107 	if (rpc_buffer_mempool)
1108 		mempool_destroy(rpc_buffer_mempool);
1109 	if (rpc_task_mempool)
1110 		mempool_destroy(rpc_task_mempool);
1111 	if (rpc_task_slabp)
1112 		kmem_cache_destroy(rpc_task_slabp);
1113 	if (rpc_buffer_slabp)
1114 		kmem_cache_destroy(rpc_buffer_slabp);
1115 	rpc_destroy_wait_queue(&delay_queue);
1116 }
1117 
1118 int
rpc_init_mempool(void)1119 rpc_init_mempool(void)
1120 {
1121 	/*
1122 	 * The following is not strictly a mempool initialisation,
1123 	 * but there is no harm in doing it here
1124 	 */
1125 	rpc_init_wait_queue(&delay_queue, "delayq");
1126 	if (!rpciod_start())
1127 		goto err_nomem;
1128 
1129 	rpc_task_slabp = kmem_cache_create("rpc_tasks",
1130 					     sizeof(struct rpc_task),
1131 					     0, SLAB_HWCACHE_ALIGN,
1132 					     NULL);
1133 	if (!rpc_task_slabp)
1134 		goto err_nomem;
1135 	rpc_buffer_slabp = kmem_cache_create("rpc_buffers",
1136 					     RPC_BUFFER_MAXSIZE,
1137 					     0, SLAB_HWCACHE_ALIGN,
1138 					     NULL);
1139 	if (!rpc_buffer_slabp)
1140 		goto err_nomem;
1141 	rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE,
1142 						    rpc_task_slabp);
1143 	if (!rpc_task_mempool)
1144 		goto err_nomem;
1145 	rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE,
1146 						      rpc_buffer_slabp);
1147 	if (!rpc_buffer_mempool)
1148 		goto err_nomem;
1149 	return 0;
1150 err_nomem:
1151 	rpc_destroy_mempool();
1152 	return -ENOMEM;
1153 }
1154