• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Read-Copy Update mechanism for mutual exclusion (tree-based version)
3  * Internal non-public definitions that provide either classic
4  * or preemptible semantics.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19  *
20  * Copyright Red Hat, 2009
21  * Copyright IBM Corporation, 2009
22  *
23  * Author: Ingo Molnar <mingo@elte.hu>
24  *	   Paul E. McKenney <paulmck@linux.vnet.ibm.com>
25  */
26 
27 #include <linux/delay.h>
28 #include <linux/gfp.h>
29 #include <linux/oom.h>
30 #include <linux/smpboot.h>
31 #include <linux/tick.h>
32 
33 #define RCU_KTHREAD_PRIO 1
34 
35 #ifdef CONFIG_RCU_BOOST
36 #define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO
37 #else
38 #define RCU_BOOST_PRIO RCU_KTHREAD_PRIO
39 #endif
40 
41 #ifdef CONFIG_RCU_NOCB_CPU
42 static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
43 static bool have_rcu_nocb_mask;	    /* Was rcu_nocb_mask allocated? */
44 static bool __read_mostly rcu_nocb_poll;    /* Offload kthread are to poll. */
45 static char __initdata nocb_buf[NR_CPUS * 5];
46 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
47 
48 /*
49  * Check the RCU kernel configuration parameters and print informative
50  * messages about anything out of the ordinary.  If you like #ifdef, you
51  * will love this function.
52  */
rcu_bootup_announce_oddness(void)53 static void __init rcu_bootup_announce_oddness(void)
54 {
55 #ifdef CONFIG_RCU_TRACE
56 	printk(KERN_INFO "\tRCU debugfs-based tracing is enabled.\n");
57 #endif
58 #if (defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 64) || (!defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 32)
59 	printk(KERN_INFO "\tCONFIG_RCU_FANOUT set to non-default value of %d\n",
60 	       CONFIG_RCU_FANOUT);
61 #endif
62 #ifdef CONFIG_RCU_FANOUT_EXACT
63 	printk(KERN_INFO "\tHierarchical RCU autobalancing is disabled.\n");
64 #endif
65 #ifdef CONFIG_RCU_FAST_NO_HZ
66 	printk(KERN_INFO
67 	       "\tRCU dyntick-idle grace-period acceleration is enabled.\n");
68 #endif
69 #ifdef CONFIG_PROVE_RCU
70 	printk(KERN_INFO "\tRCU lockdep checking is enabled.\n");
71 #endif
72 #ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE
73 	printk(KERN_INFO "\tRCU torture testing starts during boot.\n");
74 #endif
75 #if defined(CONFIG_TREE_PREEMPT_RCU) && !defined(CONFIG_RCU_CPU_STALL_VERBOSE)
76 	printk(KERN_INFO "\tDump stacks of tasks blocking RCU-preempt GP.\n");
77 #endif
78 #if defined(CONFIG_RCU_CPU_STALL_INFO)
79 	printk(KERN_INFO "\tAdditional per-CPU info printed with stalls.\n");
80 #endif
81 #if NUM_RCU_LVL_4 != 0
82 	printk(KERN_INFO "\tFour-level hierarchy is enabled.\n");
83 #endif
84 	if (rcu_fanout_leaf != CONFIG_RCU_FANOUT_LEAF)
85 		printk(KERN_INFO "\tExperimental boot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf);
86 	if (nr_cpu_ids != NR_CPUS)
87 		printk(KERN_INFO "\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%d.\n", NR_CPUS, nr_cpu_ids);
88 #ifdef CONFIG_RCU_NOCB_CPU
89 #ifndef CONFIG_RCU_NOCB_CPU_NONE
90 	if (!have_rcu_nocb_mask) {
91 		zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL);
92 		have_rcu_nocb_mask = true;
93 	}
94 #ifdef CONFIG_RCU_NOCB_CPU_ZERO
95 	pr_info("\tExperimental no-CBs CPU 0\n");
96 	cpumask_set_cpu(0, rcu_nocb_mask);
97 #endif /* #ifdef CONFIG_RCU_NOCB_CPU_ZERO */
98 #ifdef CONFIG_RCU_NOCB_CPU_ALL
99 	pr_info("\tExperimental no-CBs for all CPUs\n");
100 	cpumask_setall(rcu_nocb_mask);
101 #endif /* #ifdef CONFIG_RCU_NOCB_CPU_ALL */
102 #endif /* #ifndef CONFIG_RCU_NOCB_CPU_NONE */
103 	if (have_rcu_nocb_mask) {
104 		cpulist_scnprintf(nocb_buf, sizeof(nocb_buf), rcu_nocb_mask);
105 		pr_info("\tExperimental no-CBs CPUs: %s.\n", nocb_buf);
106 		if (rcu_nocb_poll)
107 			pr_info("\tExperimental polled no-CBs CPUs.\n");
108 	}
109 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
110 }
111 
112 #ifdef CONFIG_TREE_PREEMPT_RCU
113 
114 struct rcu_state rcu_preempt_state =
115 	RCU_STATE_INITIALIZER(rcu_preempt, 'p', call_rcu);
116 DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
117 static struct rcu_state *rcu_state = &rcu_preempt_state;
118 
119 static int rcu_preempted_readers_exp(struct rcu_node *rnp);
120 
121 /*
122  * Tell them what RCU they are running.
123  */
rcu_bootup_announce(void)124 static void __init rcu_bootup_announce(void)
125 {
126 	printk(KERN_INFO "Preemptible hierarchical RCU implementation.\n");
127 	rcu_bootup_announce_oddness();
128 }
129 
130 /*
131  * Return the number of RCU-preempt batches processed thus far
132  * for debug and statistics.
133  */
rcu_batches_completed_preempt(void)134 long rcu_batches_completed_preempt(void)
135 {
136 	return rcu_preempt_state.completed;
137 }
138 EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt);
139 
140 /*
141  * Return the number of RCU batches processed thus far for debug & stats.
142  */
rcu_batches_completed(void)143 long rcu_batches_completed(void)
144 {
145 	return rcu_batches_completed_preempt();
146 }
147 EXPORT_SYMBOL_GPL(rcu_batches_completed);
148 
149 /*
150  * Force a quiescent state for preemptible RCU.
151  */
rcu_force_quiescent_state(void)152 void rcu_force_quiescent_state(void)
153 {
154 	force_quiescent_state(&rcu_preempt_state);
155 }
156 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
157 
158 /*
159  * Record a preemptible-RCU quiescent state for the specified CPU.  Note
160  * that this just means that the task currently running on the CPU is
161  * not in a quiescent state.  There might be any number of tasks blocked
162  * while in an RCU read-side critical section.
163  *
164  * Unlike the other rcu_*_qs() functions, callers to this function
165  * must disable irqs in order to protect the assignment to
166  * ->rcu_read_unlock_special.
167  */
rcu_preempt_qs(int cpu)168 static void rcu_preempt_qs(int cpu)
169 {
170 	struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
171 
172 	if (rdp->passed_quiesce == 0)
173 		trace_rcu_grace_period("rcu_preempt", rdp->gpnum, "cpuqs");
174 	rdp->passed_quiesce = 1;
175 	current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
176 }
177 
178 /*
179  * We have entered the scheduler, and the current task might soon be
180  * context-switched away from.  If this task is in an RCU read-side
181  * critical section, we will no longer be able to rely on the CPU to
182  * record that fact, so we enqueue the task on the blkd_tasks list.
183  * The task will dequeue itself when it exits the outermost enclosing
184  * RCU read-side critical section.  Therefore, the current grace period
185  * cannot be permitted to complete until the blkd_tasks list entries
186  * predating the current grace period drain, in other words, until
187  * rnp->gp_tasks becomes NULL.
188  *
189  * Caller must disable preemption.
190  */
rcu_preempt_note_context_switch(int cpu)191 static void rcu_preempt_note_context_switch(int cpu)
192 {
193 	struct task_struct *t = current;
194 	unsigned long flags;
195 	struct rcu_data *rdp;
196 	struct rcu_node *rnp;
197 
198 	if (t->rcu_read_lock_nesting > 0 &&
199 	    (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
200 
201 		/* Possibly blocking in an RCU read-side critical section. */
202 		rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu);
203 		rnp = rdp->mynode;
204 		raw_spin_lock_irqsave(&rnp->lock, flags);
205 		t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
206 		t->rcu_blocked_node = rnp;
207 
208 		/*
209 		 * If this CPU has already checked in, then this task
210 		 * will hold up the next grace period rather than the
211 		 * current grace period.  Queue the task accordingly.
212 		 * If the task is queued for the current grace period
213 		 * (i.e., this CPU has not yet passed through a quiescent
214 		 * state for the current grace period), then as long
215 		 * as that task remains queued, the current grace period
216 		 * cannot end.  Note that there is some uncertainty as
217 		 * to exactly when the current grace period started.
218 		 * We take a conservative approach, which can result
219 		 * in unnecessarily waiting on tasks that started very
220 		 * slightly after the current grace period began.  C'est
221 		 * la vie!!!
222 		 *
223 		 * But first, note that the current CPU must still be
224 		 * on line!
225 		 */
226 		WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0);
227 		WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
228 		if ((rnp->qsmask & rdp->grpmask) && rnp->gp_tasks != NULL) {
229 			list_add(&t->rcu_node_entry, rnp->gp_tasks->prev);
230 			rnp->gp_tasks = &t->rcu_node_entry;
231 #ifdef CONFIG_RCU_BOOST
232 			if (rnp->boost_tasks != NULL)
233 				rnp->boost_tasks = rnp->gp_tasks;
234 #endif /* #ifdef CONFIG_RCU_BOOST */
235 		} else {
236 			list_add(&t->rcu_node_entry, &rnp->blkd_tasks);
237 			if (rnp->qsmask & rdp->grpmask)
238 				rnp->gp_tasks = &t->rcu_node_entry;
239 		}
240 		trace_rcu_preempt_task(rdp->rsp->name,
241 				       t->pid,
242 				       (rnp->qsmask & rdp->grpmask)
243 				       ? rnp->gpnum
244 				       : rnp->gpnum + 1);
245 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
246 	} else if (t->rcu_read_lock_nesting < 0 &&
247 		   t->rcu_read_unlock_special) {
248 
249 		/*
250 		 * Complete exit from RCU read-side critical section on
251 		 * behalf of preempted instance of __rcu_read_unlock().
252 		 */
253 		rcu_read_unlock_special(t);
254 	}
255 
256 	/*
257 	 * Either we were not in an RCU read-side critical section to
258 	 * begin with, or we have now recorded that critical section
259 	 * globally.  Either way, we can now note a quiescent state
260 	 * for this CPU.  Again, if we were in an RCU read-side critical
261 	 * section, and if that critical section was blocking the current
262 	 * grace period, then the fact that the task has been enqueued
263 	 * means that we continue to block the current grace period.
264 	 */
265 	local_irq_save(flags);
266 	rcu_preempt_qs(cpu);
267 	local_irq_restore(flags);
268 }
269 
270 /*
271  * Check for preempted RCU readers blocking the current grace period
272  * for the specified rcu_node structure.  If the caller needs a reliable
273  * answer, it must hold the rcu_node's ->lock.
274  */
rcu_preempt_blocked_readers_cgp(struct rcu_node * rnp)275 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
276 {
277 	return rnp->gp_tasks != NULL;
278 }
279 
280 /*
281  * Record a quiescent state for all tasks that were previously queued
282  * on the specified rcu_node structure and that were blocking the current
283  * RCU grace period.  The caller must hold the specified rnp->lock with
284  * irqs disabled, and this lock is released upon return, but irqs remain
285  * disabled.
286  */
rcu_report_unblock_qs_rnp(struct rcu_node * rnp,unsigned long flags)287 static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
288 	__releases(rnp->lock)
289 {
290 	unsigned long mask;
291 	struct rcu_node *rnp_p;
292 
293 	if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
294 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
295 		return;  /* Still need more quiescent states! */
296 	}
297 
298 	rnp_p = rnp->parent;
299 	if (rnp_p == NULL) {
300 		/*
301 		 * Either there is only one rcu_node in the tree,
302 		 * or tasks were kicked up to root rcu_node due to
303 		 * CPUs going offline.
304 		 */
305 		rcu_report_qs_rsp(&rcu_preempt_state, flags);
306 		return;
307 	}
308 
309 	/* Report up the rest of the hierarchy. */
310 	mask = rnp->grpmask;
311 	raw_spin_unlock(&rnp->lock);	/* irqs remain disabled. */
312 	raw_spin_lock(&rnp_p->lock);	/* irqs already disabled. */
313 	rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags);
314 }
315 
316 /*
317  * Advance a ->blkd_tasks-list pointer to the next entry, instead
318  * returning NULL if at the end of the list.
319  */
rcu_next_node_entry(struct task_struct * t,struct rcu_node * rnp)320 static struct list_head *rcu_next_node_entry(struct task_struct *t,
321 					     struct rcu_node *rnp)
322 {
323 	struct list_head *np;
324 
325 	np = t->rcu_node_entry.next;
326 	if (np == &rnp->blkd_tasks)
327 		np = NULL;
328 	return np;
329 }
330 
331 /*
332  * Handle special cases during rcu_read_unlock(), such as needing to
333  * notify RCU core processing or task having blocked during the RCU
334  * read-side critical section.
335  */
rcu_read_unlock_special(struct task_struct * t)336 void rcu_read_unlock_special(struct task_struct *t)
337 {
338 	int empty;
339 	int empty_exp;
340 	int empty_exp_now;
341 	unsigned long flags;
342 	struct list_head *np;
343 #ifdef CONFIG_RCU_BOOST
344 	struct rt_mutex *rbmp = NULL;
345 #endif /* #ifdef CONFIG_RCU_BOOST */
346 	struct rcu_node *rnp;
347 	int special;
348 
349 	/* NMI handlers cannot block and cannot safely manipulate state. */
350 	if (in_nmi())
351 		return;
352 
353 	local_irq_save(flags);
354 
355 	/*
356 	 * If RCU core is waiting for this CPU to exit critical section,
357 	 * let it know that we have done so.
358 	 */
359 	special = t->rcu_read_unlock_special;
360 	if (special & RCU_READ_UNLOCK_NEED_QS) {
361 		rcu_preempt_qs(smp_processor_id());
362 	}
363 
364 	/* Hardware IRQ handlers cannot block. */
365 	if (in_irq() || in_serving_softirq()) {
366 		local_irq_restore(flags);
367 		return;
368 	}
369 
370 	/* Clean up if blocked during RCU read-side critical section. */
371 	if (special & RCU_READ_UNLOCK_BLOCKED) {
372 		t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED;
373 
374 		/*
375 		 * Remove this task from the list it blocked on.  The
376 		 * task can migrate while we acquire the lock, but at
377 		 * most one time.  So at most two passes through loop.
378 		 */
379 		for (;;) {
380 			rnp = t->rcu_blocked_node;
381 			raw_spin_lock(&rnp->lock);  /* irqs already disabled. */
382 			if (rnp == t->rcu_blocked_node)
383 				break;
384 			raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
385 		}
386 		empty = !rcu_preempt_blocked_readers_cgp(rnp);
387 		empty_exp = !rcu_preempted_readers_exp(rnp);
388 		smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
389 		np = rcu_next_node_entry(t, rnp);
390 		list_del_init(&t->rcu_node_entry);
391 		t->rcu_blocked_node = NULL;
392 		trace_rcu_unlock_preempted_task("rcu_preempt",
393 						rnp->gpnum, t->pid);
394 		if (&t->rcu_node_entry == rnp->gp_tasks)
395 			rnp->gp_tasks = np;
396 		if (&t->rcu_node_entry == rnp->exp_tasks)
397 			rnp->exp_tasks = np;
398 #ifdef CONFIG_RCU_BOOST
399 		if (&t->rcu_node_entry == rnp->boost_tasks)
400 			rnp->boost_tasks = np;
401 		/* Snapshot/clear ->rcu_boost_mutex with rcu_node lock held. */
402 		if (t->rcu_boost_mutex) {
403 			rbmp = t->rcu_boost_mutex;
404 			t->rcu_boost_mutex = NULL;
405 		}
406 #endif /* #ifdef CONFIG_RCU_BOOST */
407 
408 		/*
409 		 * If this was the last task on the current list, and if
410 		 * we aren't waiting on any CPUs, report the quiescent state.
411 		 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock,
412 		 * so we must take a snapshot of the expedited state.
413 		 */
414 		empty_exp_now = !rcu_preempted_readers_exp(rnp);
415 		if (!empty && !rcu_preempt_blocked_readers_cgp(rnp)) {
416 			trace_rcu_quiescent_state_report("preempt_rcu",
417 							 rnp->gpnum,
418 							 0, rnp->qsmask,
419 							 rnp->level,
420 							 rnp->grplo,
421 							 rnp->grphi,
422 							 !!rnp->gp_tasks);
423 			rcu_report_unblock_qs_rnp(rnp, flags);
424 		} else {
425 			raw_spin_unlock_irqrestore(&rnp->lock, flags);
426 		}
427 
428 #ifdef CONFIG_RCU_BOOST
429 		/* Unboost if we were boosted. */
430 		if (rbmp)
431 			rt_mutex_unlock(rbmp);
432 #endif /* #ifdef CONFIG_RCU_BOOST */
433 
434 		/*
435 		 * If this was the last task on the expedited lists,
436 		 * then we need to report up the rcu_node hierarchy.
437 		 */
438 		if (!empty_exp && empty_exp_now)
439 			rcu_report_exp_rnp(&rcu_preempt_state, rnp, true);
440 	} else {
441 		local_irq_restore(flags);
442 	}
443 }
444 
445 #ifdef CONFIG_RCU_CPU_STALL_VERBOSE
446 
447 /*
448  * Dump detailed information for all tasks blocking the current RCU
449  * grace period on the specified rcu_node structure.
450  */
rcu_print_detail_task_stall_rnp(struct rcu_node * rnp)451 static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
452 {
453 	unsigned long flags;
454 	struct task_struct *t;
455 
456 	raw_spin_lock_irqsave(&rnp->lock, flags);
457 	if (!rcu_preempt_blocked_readers_cgp(rnp)) {
458 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
459 		return;
460 	}
461 	t = list_entry(rnp->gp_tasks,
462 		       struct task_struct, rcu_node_entry);
463 	list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry)
464 		sched_show_task(t);
465 	raw_spin_unlock_irqrestore(&rnp->lock, flags);
466 }
467 
468 /*
469  * Dump detailed information for all tasks blocking the current RCU
470  * grace period.
471  */
rcu_print_detail_task_stall(struct rcu_state * rsp)472 static void rcu_print_detail_task_stall(struct rcu_state *rsp)
473 {
474 	struct rcu_node *rnp = rcu_get_root(rsp);
475 
476 	rcu_print_detail_task_stall_rnp(rnp);
477 	rcu_for_each_leaf_node(rsp, rnp)
478 		rcu_print_detail_task_stall_rnp(rnp);
479 }
480 
481 #else /* #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
482 
rcu_print_detail_task_stall(struct rcu_state * rsp)483 static void rcu_print_detail_task_stall(struct rcu_state *rsp)
484 {
485 }
486 
487 #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
488 
489 #ifdef CONFIG_RCU_CPU_STALL_INFO
490 
rcu_print_task_stall_begin(struct rcu_node * rnp)491 static void rcu_print_task_stall_begin(struct rcu_node *rnp)
492 {
493 	printk(KERN_ERR "\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
494 	       rnp->level, rnp->grplo, rnp->grphi);
495 }
496 
rcu_print_task_stall_end(void)497 static void rcu_print_task_stall_end(void)
498 {
499 	printk(KERN_CONT "\n");
500 }
501 
502 #else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
503 
rcu_print_task_stall_begin(struct rcu_node * rnp)504 static void rcu_print_task_stall_begin(struct rcu_node *rnp)
505 {
506 }
507 
rcu_print_task_stall_end(void)508 static void rcu_print_task_stall_end(void)
509 {
510 }
511 
512 #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_INFO */
513 
514 /*
515  * Scan the current list of tasks blocked within RCU read-side critical
516  * sections, printing out the tid of each.
517  */
rcu_print_task_stall(struct rcu_node * rnp)518 static int rcu_print_task_stall(struct rcu_node *rnp)
519 {
520 	struct task_struct *t;
521 	int ndetected = 0;
522 
523 	if (!rcu_preempt_blocked_readers_cgp(rnp))
524 		return 0;
525 	rcu_print_task_stall_begin(rnp);
526 	t = list_entry(rnp->gp_tasks,
527 		       struct task_struct, rcu_node_entry);
528 	list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
529 		printk(KERN_CONT " P%d", t->pid);
530 		ndetected++;
531 	}
532 	rcu_print_task_stall_end();
533 	return ndetected;
534 }
535 
536 /*
537  * Check that the list of blocked tasks for the newly completed grace
538  * period is in fact empty.  It is a serious bug to complete a grace
539  * period that still has RCU readers blocked!  This function must be
540  * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock
541  * must be held by the caller.
542  *
543  * Also, if there are blocked tasks on the list, they automatically
544  * block the newly created grace period, so set up ->gp_tasks accordingly.
545  */
rcu_preempt_check_blocked_tasks(struct rcu_node * rnp)546 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
547 {
548 	WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
549 	if (!list_empty(&rnp->blkd_tasks))
550 		rnp->gp_tasks = rnp->blkd_tasks.next;
551 	WARN_ON_ONCE(rnp->qsmask);
552 }
553 
554 #ifdef CONFIG_HOTPLUG_CPU
555 
556 /*
557  * Handle tasklist migration for case in which all CPUs covered by the
558  * specified rcu_node have gone offline.  Move them up to the root
559  * rcu_node.  The reason for not just moving them to the immediate
560  * parent is to remove the need for rcu_read_unlock_special() to
561  * make more than two attempts to acquire the target rcu_node's lock.
562  * Returns true if there were tasks blocking the current RCU grace
563  * period.
564  *
565  * Returns 1 if there was previously a task blocking the current grace
566  * period on the specified rcu_node structure.
567  *
568  * The caller must hold rnp->lock with irqs disabled.
569  */
rcu_preempt_offline_tasks(struct rcu_state * rsp,struct rcu_node * rnp,struct rcu_data * rdp)570 static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
571 				     struct rcu_node *rnp,
572 				     struct rcu_data *rdp)
573 {
574 	struct list_head *lp;
575 	struct list_head *lp_root;
576 	int retval = 0;
577 	struct rcu_node *rnp_root = rcu_get_root(rsp);
578 	struct task_struct *t;
579 
580 	if (rnp == rnp_root) {
581 		WARN_ONCE(1, "Last CPU thought to be offlined?");
582 		return 0;  /* Shouldn't happen: at least one CPU online. */
583 	}
584 
585 	/* If we are on an internal node, complain bitterly. */
586 	WARN_ON_ONCE(rnp != rdp->mynode);
587 
588 	/*
589 	 * Move tasks up to root rcu_node.  Don't try to get fancy for
590 	 * this corner-case operation -- just put this node's tasks
591 	 * at the head of the root node's list, and update the root node's
592 	 * ->gp_tasks and ->exp_tasks pointers to those of this node's,
593 	 * if non-NULL.  This might result in waiting for more tasks than
594 	 * absolutely necessary, but this is a good performance/complexity
595 	 * tradeoff.
596 	 */
597 	if (rcu_preempt_blocked_readers_cgp(rnp) && rnp->qsmask == 0)
598 		retval |= RCU_OFL_TASKS_NORM_GP;
599 	if (rcu_preempted_readers_exp(rnp))
600 		retval |= RCU_OFL_TASKS_EXP_GP;
601 	lp = &rnp->blkd_tasks;
602 	lp_root = &rnp_root->blkd_tasks;
603 	while (!list_empty(lp)) {
604 		t = list_entry(lp->next, typeof(*t), rcu_node_entry);
605 		raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
606 		list_del(&t->rcu_node_entry);
607 		t->rcu_blocked_node = rnp_root;
608 		list_add(&t->rcu_node_entry, lp_root);
609 		if (&t->rcu_node_entry == rnp->gp_tasks)
610 			rnp_root->gp_tasks = rnp->gp_tasks;
611 		if (&t->rcu_node_entry == rnp->exp_tasks)
612 			rnp_root->exp_tasks = rnp->exp_tasks;
613 #ifdef CONFIG_RCU_BOOST
614 		if (&t->rcu_node_entry == rnp->boost_tasks)
615 			rnp_root->boost_tasks = rnp->boost_tasks;
616 #endif /* #ifdef CONFIG_RCU_BOOST */
617 		raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */
618 	}
619 
620 	rnp->gp_tasks = NULL;
621 	rnp->exp_tasks = NULL;
622 #ifdef CONFIG_RCU_BOOST
623 	rnp->boost_tasks = NULL;
624 	/*
625 	 * In case root is being boosted and leaf was not.  Make sure
626 	 * that we boost the tasks blocking the current grace period
627 	 * in this case.
628 	 */
629 	raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
630 	if (rnp_root->boost_tasks != NULL &&
631 	    rnp_root->boost_tasks != rnp_root->gp_tasks &&
632 	    rnp_root->boost_tasks != rnp_root->exp_tasks)
633 		rnp_root->boost_tasks = rnp_root->gp_tasks;
634 	raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */
635 #endif /* #ifdef CONFIG_RCU_BOOST */
636 
637 	return retval;
638 }
639 
640 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
641 
642 /*
643  * Check for a quiescent state from the current CPU.  When a task blocks,
644  * the task is recorded in the corresponding CPU's rcu_node structure,
645  * which is checked elsewhere.
646  *
647  * Caller must disable hard irqs.
648  */
rcu_preempt_check_callbacks(int cpu)649 static void rcu_preempt_check_callbacks(int cpu)
650 {
651 	struct task_struct *t = current;
652 
653 	if (t->rcu_read_lock_nesting == 0) {
654 		rcu_preempt_qs(cpu);
655 		return;
656 	}
657 	if (t->rcu_read_lock_nesting > 0 &&
658 	    per_cpu(rcu_preempt_data, cpu).qs_pending)
659 		t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
660 }
661 
662 #ifdef CONFIG_RCU_BOOST
663 
rcu_preempt_do_callbacks(void)664 static void rcu_preempt_do_callbacks(void)
665 {
666 	rcu_do_batch(&rcu_preempt_state, &__get_cpu_var(rcu_preempt_data));
667 }
668 
669 #endif /* #ifdef CONFIG_RCU_BOOST */
670 
671 /*
672  * Queue a preemptible-RCU callback for invocation after a grace period.
673  */
call_rcu(struct rcu_head * head,void (* func)(struct rcu_head * rcu))674 void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
675 {
676 	__call_rcu(head, func, &rcu_preempt_state, -1, 0);
677 }
678 EXPORT_SYMBOL_GPL(call_rcu);
679 
680 /*
681  * Queue an RCU callback for lazy invocation after a grace period.
682  * This will likely be later named something like "call_rcu_lazy()",
683  * but this change will require some way of tagging the lazy RCU
684  * callbacks in the list of pending callbacks.  Until then, this
685  * function may only be called from __kfree_rcu().
686  */
kfree_call_rcu(struct rcu_head * head,void (* func)(struct rcu_head * rcu))687 void kfree_call_rcu(struct rcu_head *head,
688 		    void (*func)(struct rcu_head *rcu))
689 {
690 	__call_rcu(head, func, &rcu_preempt_state, -1, 1);
691 }
692 EXPORT_SYMBOL_GPL(kfree_call_rcu);
693 
694 /**
695  * synchronize_rcu - wait until a grace period has elapsed.
696  *
697  * Control will return to the caller some time after a full grace
698  * period has elapsed, in other words after all currently executing RCU
699  * read-side critical sections have completed.  Note, however, that
700  * upon return from synchronize_rcu(), the caller might well be executing
701  * concurrently with new RCU read-side critical sections that began while
702  * synchronize_rcu() was waiting.  RCU read-side critical sections are
703  * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
704  *
705  * See the description of synchronize_sched() for more detailed information
706  * on memory ordering guarantees.
707  */
synchronize_rcu(void)708 void synchronize_rcu(void)
709 {
710 	rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
711 			   !lock_is_held(&rcu_lock_map) &&
712 			   !lock_is_held(&rcu_sched_lock_map),
713 			   "Illegal synchronize_rcu() in RCU read-side critical section");
714 	if (!rcu_scheduler_active)
715 		return;
716 	if (rcu_expedited)
717 		synchronize_rcu_expedited();
718 	else
719 		wait_rcu_gp(call_rcu);
720 }
721 EXPORT_SYMBOL_GPL(synchronize_rcu);
722 
723 static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
724 static unsigned long sync_rcu_preempt_exp_count;
725 static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
726 
727 /*
728  * Return non-zero if there are any tasks in RCU read-side critical
729  * sections blocking the current preemptible-RCU expedited grace period.
730  * If there is no preemptible-RCU expedited grace period currently in
731  * progress, returns zero unconditionally.
732  */
rcu_preempted_readers_exp(struct rcu_node * rnp)733 static int rcu_preempted_readers_exp(struct rcu_node *rnp)
734 {
735 	return rnp->exp_tasks != NULL;
736 }
737 
738 /*
739  * return non-zero if there is no RCU expedited grace period in progress
740  * for the specified rcu_node structure, in other words, if all CPUs and
741  * tasks covered by the specified rcu_node structure have done their bit
742  * for the current expedited grace period.  Works only for preemptible
743  * RCU -- other RCU implementation use other means.
744  *
745  * Caller must hold sync_rcu_preempt_exp_mutex.
746  */
sync_rcu_preempt_exp_done(struct rcu_node * rnp)747 static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
748 {
749 	return !rcu_preempted_readers_exp(rnp) &&
750 	       ACCESS_ONCE(rnp->expmask) == 0;
751 }
752 
753 /*
754  * Report the exit from RCU read-side critical section for the last task
755  * that queued itself during or before the current expedited preemptible-RCU
756  * grace period.  This event is reported either to the rcu_node structure on
757  * which the task was queued or to one of that rcu_node structure's ancestors,
758  * recursively up the tree.  (Calm down, calm down, we do the recursion
759  * iteratively!)
760  *
761  * Most callers will set the "wake" flag, but the task initiating the
762  * expedited grace period need not wake itself.
763  *
764  * Caller must hold sync_rcu_preempt_exp_mutex.
765  */
rcu_report_exp_rnp(struct rcu_state * rsp,struct rcu_node * rnp,bool wake)766 static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
767 			       bool wake)
768 {
769 	unsigned long flags;
770 	unsigned long mask;
771 
772 	raw_spin_lock_irqsave(&rnp->lock, flags);
773 	for (;;) {
774 		if (!sync_rcu_preempt_exp_done(rnp)) {
775 			raw_spin_unlock_irqrestore(&rnp->lock, flags);
776 			break;
777 		}
778 		if (rnp->parent == NULL) {
779 			raw_spin_unlock_irqrestore(&rnp->lock, flags);
780 			if (wake)
781 				wake_up(&sync_rcu_preempt_exp_wq);
782 			break;
783 		}
784 		mask = rnp->grpmask;
785 		raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
786 		rnp = rnp->parent;
787 		raw_spin_lock(&rnp->lock); /* irqs already disabled */
788 		rnp->expmask &= ~mask;
789 	}
790 }
791 
792 /*
793  * Snapshot the tasks blocking the newly started preemptible-RCU expedited
794  * grace period for the specified rcu_node structure.  If there are no such
795  * tasks, report it up the rcu_node hierarchy.
796  *
797  * Caller must hold sync_rcu_preempt_exp_mutex and must exclude
798  * CPU hotplug operations.
799  */
800 static void
sync_rcu_preempt_exp_init(struct rcu_state * rsp,struct rcu_node * rnp)801 sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
802 {
803 	unsigned long flags;
804 	int must_wait = 0;
805 
806 	raw_spin_lock_irqsave(&rnp->lock, flags);
807 	if (list_empty(&rnp->blkd_tasks)) {
808 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
809 	} else {
810 		rnp->exp_tasks = rnp->blkd_tasks.next;
811 		rcu_initiate_boost(rnp, flags);  /* releases rnp->lock */
812 		must_wait = 1;
813 	}
814 	if (!must_wait)
815 		rcu_report_exp_rnp(rsp, rnp, false); /* Don't wake self. */
816 }
817 
818 /**
819  * synchronize_rcu_expedited - Brute-force RCU grace period
820  *
821  * Wait for an RCU-preempt grace period, but expedite it.  The basic
822  * idea is to invoke synchronize_sched_expedited() to push all the tasks to
823  * the ->blkd_tasks lists and wait for this list to drain.  This consumes
824  * significant time on all CPUs and is unfriendly to real-time workloads,
825  * so is thus not recommended for any sort of common-case code.
826  * In fact, if you are using synchronize_rcu_expedited() in a loop,
827  * please restructure your code to batch your updates, and then Use a
828  * single synchronize_rcu() instead.
829  *
830  * Note that it is illegal to call this function while holding any lock
831  * that is acquired by a CPU-hotplug notifier.  And yes, it is also illegal
832  * to call this function from a CPU-hotplug notifier.  Failing to observe
833  * these restriction will result in deadlock.
834  */
synchronize_rcu_expedited(void)835 void synchronize_rcu_expedited(void)
836 {
837 	unsigned long flags;
838 	struct rcu_node *rnp;
839 	struct rcu_state *rsp = &rcu_preempt_state;
840 	unsigned long snap;
841 	int trycount = 0;
842 
843 	smp_mb(); /* Caller's modifications seen first by other CPUs. */
844 	snap = ACCESS_ONCE(sync_rcu_preempt_exp_count) + 1;
845 	smp_mb(); /* Above access cannot bleed into critical section. */
846 
847 	/*
848 	 * Block CPU-hotplug operations.  This means that any CPU-hotplug
849 	 * operation that finds an rcu_node structure with tasks in the
850 	 * process of being boosted will know that all tasks blocking
851 	 * this expedited grace period will already be in the process of
852 	 * being boosted.  This simplifies the process of moving tasks
853 	 * from leaf to root rcu_node structures.
854 	 */
855 	get_online_cpus();
856 
857 	/*
858 	 * Acquire lock, falling back to synchronize_rcu() if too many
859 	 * lock-acquisition failures.  Of course, if someone does the
860 	 * expedited grace period for us, just leave.
861 	 */
862 	while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) {
863 		if (ULONG_CMP_LT(snap,
864 		    ACCESS_ONCE(sync_rcu_preempt_exp_count))) {
865 			put_online_cpus();
866 			goto mb_ret; /* Others did our work for us. */
867 		}
868 		if (trycount++ < 10) {
869 			udelay(trycount * num_online_cpus());
870 		} else {
871 			put_online_cpus();
872 			wait_rcu_gp(call_rcu);
873 			return;
874 		}
875 	}
876 	if (ULONG_CMP_LT(snap, ACCESS_ONCE(sync_rcu_preempt_exp_count))) {
877 		put_online_cpus();
878 		goto unlock_mb_ret; /* Others did our work for us. */
879 	}
880 
881 	/* force all RCU readers onto ->blkd_tasks lists. */
882 	synchronize_sched_expedited();
883 
884 	/* Initialize ->expmask for all non-leaf rcu_node structures. */
885 	rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) {
886 		raw_spin_lock_irqsave(&rnp->lock, flags);
887 		rnp->expmask = rnp->qsmaskinit;
888 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
889 	}
890 
891 	/* Snapshot current state of ->blkd_tasks lists. */
892 	rcu_for_each_leaf_node(rsp, rnp)
893 		sync_rcu_preempt_exp_init(rsp, rnp);
894 	if (NUM_RCU_NODES > 1)
895 		sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp));
896 
897 	put_online_cpus();
898 
899 	/* Wait for snapshotted ->blkd_tasks lists to drain. */
900 	rnp = rcu_get_root(rsp);
901 	wait_event(sync_rcu_preempt_exp_wq,
902 		   sync_rcu_preempt_exp_done(rnp));
903 
904 	/* Clean up and exit. */
905 	smp_mb(); /* ensure expedited GP seen before counter increment. */
906 	ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
907 unlock_mb_ret:
908 	mutex_unlock(&sync_rcu_preempt_exp_mutex);
909 mb_ret:
910 	smp_mb(); /* ensure subsequent action seen after grace period. */
911 }
912 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
913 
914 /**
915  * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
916  *
917  * Note that this primitive does not necessarily wait for an RCU grace period
918  * to complete.  For example, if there are no RCU callbacks queued anywhere
919  * in the system, then rcu_barrier() is within its rights to return
920  * immediately, without waiting for anything, much less an RCU grace period.
921  */
rcu_barrier(void)922 void rcu_barrier(void)
923 {
924 	_rcu_barrier(&rcu_preempt_state);
925 }
926 EXPORT_SYMBOL_GPL(rcu_barrier);
927 
928 /*
929  * Initialize preemptible RCU's state structures.
930  */
__rcu_init_preempt(void)931 static void __init __rcu_init_preempt(void)
932 {
933 	rcu_init_one(&rcu_preempt_state, &rcu_preempt_data);
934 }
935 
936 #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
937 
938 static struct rcu_state *rcu_state = &rcu_sched_state;
939 
940 /*
941  * Tell them what RCU they are running.
942  */
rcu_bootup_announce(void)943 static void __init rcu_bootup_announce(void)
944 {
945 	printk(KERN_INFO "Hierarchical RCU implementation.\n");
946 	rcu_bootup_announce_oddness();
947 }
948 
949 /*
950  * Return the number of RCU batches processed thus far for debug & stats.
951  */
rcu_batches_completed(void)952 long rcu_batches_completed(void)
953 {
954 	return rcu_batches_completed_sched();
955 }
956 EXPORT_SYMBOL_GPL(rcu_batches_completed);
957 
958 /*
959  * Force a quiescent state for RCU, which, because there is no preemptible
960  * RCU, becomes the same as rcu-sched.
961  */
rcu_force_quiescent_state(void)962 void rcu_force_quiescent_state(void)
963 {
964 	rcu_sched_force_quiescent_state();
965 }
966 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
967 
968 /*
969  * Because preemptible RCU does not exist, we never have to check for
970  * CPUs being in quiescent states.
971  */
rcu_preempt_note_context_switch(int cpu)972 static void rcu_preempt_note_context_switch(int cpu)
973 {
974 }
975 
976 /*
977  * Because preemptible RCU does not exist, there are never any preempted
978  * RCU readers.
979  */
rcu_preempt_blocked_readers_cgp(struct rcu_node * rnp)980 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
981 {
982 	return 0;
983 }
984 
985 #ifdef CONFIG_HOTPLUG_CPU
986 
987 /* Because preemptible RCU does not exist, no quieting of tasks. */
rcu_report_unblock_qs_rnp(struct rcu_node * rnp,unsigned long flags)988 static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
989 {
990 	raw_spin_unlock_irqrestore(&rnp->lock, flags);
991 }
992 
993 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
994 
995 /*
996  * Because preemptible RCU does not exist, we never have to check for
997  * tasks blocked within RCU read-side critical sections.
998  */
rcu_print_detail_task_stall(struct rcu_state * rsp)999 static void rcu_print_detail_task_stall(struct rcu_state *rsp)
1000 {
1001 }
1002 
1003 /*
1004  * Because preemptible RCU does not exist, we never have to check for
1005  * tasks blocked within RCU read-side critical sections.
1006  */
rcu_print_task_stall(struct rcu_node * rnp)1007 static int rcu_print_task_stall(struct rcu_node *rnp)
1008 {
1009 	return 0;
1010 }
1011 
1012 /*
1013  * Because there is no preemptible RCU, there can be no readers blocked,
1014  * so there is no need to check for blocked tasks.  So check only for
1015  * bogus qsmask values.
1016  */
rcu_preempt_check_blocked_tasks(struct rcu_node * rnp)1017 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
1018 {
1019 	WARN_ON_ONCE(rnp->qsmask);
1020 }
1021 
1022 #ifdef CONFIG_HOTPLUG_CPU
1023 
1024 /*
1025  * Because preemptible RCU does not exist, it never needs to migrate
1026  * tasks that were blocked within RCU read-side critical sections, and
1027  * such non-existent tasks cannot possibly have been blocking the current
1028  * grace period.
1029  */
rcu_preempt_offline_tasks(struct rcu_state * rsp,struct rcu_node * rnp,struct rcu_data * rdp)1030 static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
1031 				     struct rcu_node *rnp,
1032 				     struct rcu_data *rdp)
1033 {
1034 	return 0;
1035 }
1036 
1037 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1038 
1039 /*
1040  * Because preemptible RCU does not exist, it never has any callbacks
1041  * to check.
1042  */
rcu_preempt_check_callbacks(int cpu)1043 static void rcu_preempt_check_callbacks(int cpu)
1044 {
1045 }
1046 
1047 /*
1048  * Queue an RCU callback for lazy invocation after a grace period.
1049  * This will likely be later named something like "call_rcu_lazy()",
1050  * but this change will require some way of tagging the lazy RCU
1051  * callbacks in the list of pending callbacks.  Until then, this
1052  * function may only be called from __kfree_rcu().
1053  *
1054  * Because there is no preemptible RCU, we use RCU-sched instead.
1055  */
kfree_call_rcu(struct rcu_head * head,void (* func)(struct rcu_head * rcu))1056 void kfree_call_rcu(struct rcu_head *head,
1057 		    void (*func)(struct rcu_head *rcu))
1058 {
1059 	__call_rcu(head, func, &rcu_sched_state, -1, 1);
1060 }
1061 EXPORT_SYMBOL_GPL(kfree_call_rcu);
1062 
1063 /*
1064  * Wait for an rcu-preempt grace period, but make it happen quickly.
1065  * But because preemptible RCU does not exist, map to rcu-sched.
1066  */
synchronize_rcu_expedited(void)1067 void synchronize_rcu_expedited(void)
1068 {
1069 	synchronize_sched_expedited();
1070 }
1071 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
1072 
1073 #ifdef CONFIG_HOTPLUG_CPU
1074 
1075 /*
1076  * Because preemptible RCU does not exist, there is never any need to
1077  * report on tasks preempted in RCU read-side critical sections during
1078  * expedited RCU grace periods.
1079  */
rcu_report_exp_rnp(struct rcu_state * rsp,struct rcu_node * rnp,bool wake)1080 static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
1081 			       bool wake)
1082 {
1083 }
1084 
1085 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1086 
1087 /*
1088  * Because preemptible RCU does not exist, rcu_barrier() is just
1089  * another name for rcu_barrier_sched().
1090  */
rcu_barrier(void)1091 void rcu_barrier(void)
1092 {
1093 	rcu_barrier_sched();
1094 }
1095 EXPORT_SYMBOL_GPL(rcu_barrier);
1096 
1097 /*
1098  * Because preemptible RCU does not exist, it need not be initialized.
1099  */
__rcu_init_preempt(void)1100 static void __init __rcu_init_preempt(void)
1101 {
1102 }
1103 
1104 #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
1105 
1106 #ifdef CONFIG_RCU_BOOST
1107 
1108 #include "rtmutex_common.h"
1109 
1110 #ifdef CONFIG_RCU_TRACE
1111 
rcu_initiate_boost_trace(struct rcu_node * rnp)1112 static void rcu_initiate_boost_trace(struct rcu_node *rnp)
1113 {
1114 	if (list_empty(&rnp->blkd_tasks))
1115 		rnp->n_balk_blkd_tasks++;
1116 	else if (rnp->exp_tasks == NULL && rnp->gp_tasks == NULL)
1117 		rnp->n_balk_exp_gp_tasks++;
1118 	else if (rnp->gp_tasks != NULL && rnp->boost_tasks != NULL)
1119 		rnp->n_balk_boost_tasks++;
1120 	else if (rnp->gp_tasks != NULL && rnp->qsmask != 0)
1121 		rnp->n_balk_notblocked++;
1122 	else if (rnp->gp_tasks != NULL &&
1123 		 ULONG_CMP_LT(jiffies, rnp->boost_time))
1124 		rnp->n_balk_notyet++;
1125 	else
1126 		rnp->n_balk_nos++;
1127 }
1128 
1129 #else /* #ifdef CONFIG_RCU_TRACE */
1130 
rcu_initiate_boost_trace(struct rcu_node * rnp)1131 static void rcu_initiate_boost_trace(struct rcu_node *rnp)
1132 {
1133 }
1134 
1135 #endif /* #else #ifdef CONFIG_RCU_TRACE */
1136 
rcu_wake_cond(struct task_struct * t,int status)1137 static void rcu_wake_cond(struct task_struct *t, int status)
1138 {
1139 	/*
1140 	 * If the thread is yielding, only wake it when this
1141 	 * is invoked from idle
1142 	 */
1143 	if (status != RCU_KTHREAD_YIELDING || is_idle_task(current))
1144 		wake_up_process(t);
1145 }
1146 
1147 /*
1148  * Carry out RCU priority boosting on the task indicated by ->exp_tasks
1149  * or ->boost_tasks, advancing the pointer to the next task in the
1150  * ->blkd_tasks list.
1151  *
1152  * Note that irqs must be enabled: boosting the task can block.
1153  * Returns 1 if there are more tasks needing to be boosted.
1154  */
rcu_boost(struct rcu_node * rnp)1155 static int rcu_boost(struct rcu_node *rnp)
1156 {
1157 	unsigned long flags;
1158 	struct rt_mutex mtx;
1159 	struct task_struct *t;
1160 	struct list_head *tb;
1161 
1162 	if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL)
1163 		return 0;  /* Nothing left to boost. */
1164 
1165 	raw_spin_lock_irqsave(&rnp->lock, flags);
1166 
1167 	/*
1168 	 * Recheck under the lock: all tasks in need of boosting
1169 	 * might exit their RCU read-side critical sections on their own.
1170 	 */
1171 	if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) {
1172 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
1173 		return 0;
1174 	}
1175 
1176 	/*
1177 	 * Preferentially boost tasks blocking expedited grace periods.
1178 	 * This cannot starve the normal grace periods because a second
1179 	 * expedited grace period must boost all blocked tasks, including
1180 	 * those blocking the pre-existing normal grace period.
1181 	 */
1182 	if (rnp->exp_tasks != NULL) {
1183 		tb = rnp->exp_tasks;
1184 		rnp->n_exp_boosts++;
1185 	} else {
1186 		tb = rnp->boost_tasks;
1187 		rnp->n_normal_boosts++;
1188 	}
1189 	rnp->n_tasks_boosted++;
1190 
1191 	/*
1192 	 * We boost task t by manufacturing an rt_mutex that appears to
1193 	 * be held by task t.  We leave a pointer to that rt_mutex where
1194 	 * task t can find it, and task t will release the mutex when it
1195 	 * exits its outermost RCU read-side critical section.  Then
1196 	 * simply acquiring this artificial rt_mutex will boost task
1197 	 * t's priority.  (Thanks to tglx for suggesting this approach!)
1198 	 *
1199 	 * Note that task t must acquire rnp->lock to remove itself from
1200 	 * the ->blkd_tasks list, which it will do from exit() if from
1201 	 * nowhere else.  We therefore are guaranteed that task t will
1202 	 * stay around at least until we drop rnp->lock.  Note that
1203 	 * rnp->lock also resolves races between our priority boosting
1204 	 * and task t's exiting its outermost RCU read-side critical
1205 	 * section.
1206 	 */
1207 	t = container_of(tb, struct task_struct, rcu_node_entry);
1208 	rt_mutex_init_proxy_locked(&mtx, t);
1209 	t->rcu_boost_mutex = &mtx;
1210 	raw_spin_unlock_irqrestore(&rnp->lock, flags);
1211 	rt_mutex_lock(&mtx);  /* Side effect: boosts task t's priority. */
1212 	rt_mutex_unlock(&mtx);  /* Keep lockdep happy. */
1213 
1214 	return ACCESS_ONCE(rnp->exp_tasks) != NULL ||
1215 	       ACCESS_ONCE(rnp->boost_tasks) != NULL;
1216 }
1217 
1218 /*
1219  * Priority-boosting kthread.  One per leaf rcu_node and one for the
1220  * root rcu_node.
1221  */
rcu_boost_kthread(void * arg)1222 static int rcu_boost_kthread(void *arg)
1223 {
1224 	struct rcu_node *rnp = (struct rcu_node *)arg;
1225 	int spincnt = 0;
1226 	int more2boost;
1227 
1228 	trace_rcu_utilization("Start boost kthread@init");
1229 	for (;;) {
1230 		rnp->boost_kthread_status = RCU_KTHREAD_WAITING;
1231 		trace_rcu_utilization("End boost kthread@rcu_wait");
1232 		rcu_wait(rnp->boost_tasks || rnp->exp_tasks);
1233 		trace_rcu_utilization("Start boost kthread@rcu_wait");
1234 		rnp->boost_kthread_status = RCU_KTHREAD_RUNNING;
1235 		more2boost = rcu_boost(rnp);
1236 		if (more2boost)
1237 			spincnt++;
1238 		else
1239 			spincnt = 0;
1240 		if (spincnt > 10) {
1241 			rnp->boost_kthread_status = RCU_KTHREAD_YIELDING;
1242 			trace_rcu_utilization("End boost kthread@rcu_yield");
1243 			schedule_timeout_interruptible(2);
1244 			trace_rcu_utilization("Start boost kthread@rcu_yield");
1245 			spincnt = 0;
1246 		}
1247 	}
1248 	/* NOTREACHED */
1249 	trace_rcu_utilization("End boost kthread@notreached");
1250 	return 0;
1251 }
1252 
1253 /*
1254  * Check to see if it is time to start boosting RCU readers that are
1255  * blocking the current grace period, and, if so, tell the per-rcu_node
1256  * kthread to start boosting them.  If there is an expedited grace
1257  * period in progress, it is always time to boost.
1258  *
1259  * The caller must hold rnp->lock, which this function releases.
1260  * The ->boost_kthread_task is immortal, so we don't need to worry
1261  * about it going away.
1262  */
rcu_initiate_boost(struct rcu_node * rnp,unsigned long flags)1263 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1264 {
1265 	struct task_struct *t;
1266 
1267 	if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) {
1268 		rnp->n_balk_exp_gp_tasks++;
1269 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
1270 		return;
1271 	}
1272 	if (rnp->exp_tasks != NULL ||
1273 	    (rnp->gp_tasks != NULL &&
1274 	     rnp->boost_tasks == NULL &&
1275 	     rnp->qsmask == 0 &&
1276 	     ULONG_CMP_GE(jiffies, rnp->boost_time))) {
1277 		if (rnp->exp_tasks == NULL)
1278 			rnp->boost_tasks = rnp->gp_tasks;
1279 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
1280 		t = rnp->boost_kthread_task;
1281 		if (t)
1282 			rcu_wake_cond(t, rnp->boost_kthread_status);
1283 	} else {
1284 		rcu_initiate_boost_trace(rnp);
1285 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
1286 	}
1287 }
1288 
1289 /*
1290  * Wake up the per-CPU kthread to invoke RCU callbacks.
1291  */
invoke_rcu_callbacks_kthread(void)1292 static void invoke_rcu_callbacks_kthread(void)
1293 {
1294 	unsigned long flags;
1295 
1296 	local_irq_save(flags);
1297 	__this_cpu_write(rcu_cpu_has_work, 1);
1298 	if (__this_cpu_read(rcu_cpu_kthread_task) != NULL &&
1299 	    current != __this_cpu_read(rcu_cpu_kthread_task)) {
1300 		rcu_wake_cond(__this_cpu_read(rcu_cpu_kthread_task),
1301 			      __this_cpu_read(rcu_cpu_kthread_status));
1302 	}
1303 	local_irq_restore(flags);
1304 }
1305 
1306 /*
1307  * Is the current CPU running the RCU-callbacks kthread?
1308  * Caller must have preemption disabled.
1309  */
rcu_is_callbacks_kthread(void)1310 static bool rcu_is_callbacks_kthread(void)
1311 {
1312 	return __get_cpu_var(rcu_cpu_kthread_task) == current;
1313 }
1314 
1315 #define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
1316 
1317 /*
1318  * Do priority-boost accounting for the start of a new grace period.
1319  */
rcu_preempt_boost_start_gp(struct rcu_node * rnp)1320 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1321 {
1322 	rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES;
1323 }
1324 
1325 /*
1326  * Create an RCU-boost kthread for the specified node if one does not
1327  * already exist.  We only create this kthread for preemptible RCU.
1328  * Returns zero if all is well, a negated errno otherwise.
1329  */
rcu_spawn_one_boost_kthread(struct rcu_state * rsp,struct rcu_node * rnp)1330 static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
1331 						 struct rcu_node *rnp)
1332 {
1333 	int rnp_index = rnp - &rsp->node[0];
1334 	unsigned long flags;
1335 	struct sched_param sp;
1336 	struct task_struct *t;
1337 
1338 	if (&rcu_preempt_state != rsp)
1339 		return 0;
1340 
1341 	if (!rcu_scheduler_fully_active || rnp->qsmaskinit == 0)
1342 		return 0;
1343 
1344 	rsp->boost = 1;
1345 	if (rnp->boost_kthread_task != NULL)
1346 		return 0;
1347 	t = kthread_create(rcu_boost_kthread, (void *)rnp,
1348 			   "rcub/%d", rnp_index);
1349 	if (IS_ERR(t))
1350 		return PTR_ERR(t);
1351 	raw_spin_lock_irqsave(&rnp->lock, flags);
1352 	rnp->boost_kthread_task = t;
1353 	raw_spin_unlock_irqrestore(&rnp->lock, flags);
1354 	sp.sched_priority = RCU_BOOST_PRIO;
1355 	sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1356 	wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
1357 	return 0;
1358 }
1359 
rcu_kthread_do_work(void)1360 static void rcu_kthread_do_work(void)
1361 {
1362 	rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data));
1363 	rcu_do_batch(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
1364 	rcu_preempt_do_callbacks();
1365 }
1366 
rcu_cpu_kthread_setup(unsigned int cpu)1367 static void rcu_cpu_kthread_setup(unsigned int cpu)
1368 {
1369 	struct sched_param sp;
1370 
1371 	sp.sched_priority = RCU_KTHREAD_PRIO;
1372 	sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
1373 }
1374 
rcu_cpu_kthread_park(unsigned int cpu)1375 static void rcu_cpu_kthread_park(unsigned int cpu)
1376 {
1377 	per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
1378 }
1379 
rcu_cpu_kthread_should_run(unsigned int cpu)1380 static int rcu_cpu_kthread_should_run(unsigned int cpu)
1381 {
1382 	return __get_cpu_var(rcu_cpu_has_work);
1383 }
1384 
1385 /*
1386  * Per-CPU kernel thread that invokes RCU callbacks.  This replaces the
1387  * RCU softirq used in flavors and configurations of RCU that do not
1388  * support RCU priority boosting.
1389  */
rcu_cpu_kthread(unsigned int cpu)1390 static void rcu_cpu_kthread(unsigned int cpu)
1391 {
1392 	unsigned int *statusp = &__get_cpu_var(rcu_cpu_kthread_status);
1393 	char work, *workp = &__get_cpu_var(rcu_cpu_has_work);
1394 	int spincnt;
1395 
1396 	for (spincnt = 0; spincnt < 10; spincnt++) {
1397 		trace_rcu_utilization("Start CPU kthread@rcu_wait");
1398 		local_bh_disable();
1399 		*statusp = RCU_KTHREAD_RUNNING;
1400 		this_cpu_inc(rcu_cpu_kthread_loops);
1401 		local_irq_disable();
1402 		work = *workp;
1403 		*workp = 0;
1404 		local_irq_enable();
1405 		if (work)
1406 			rcu_kthread_do_work();
1407 		local_bh_enable();
1408 		if (*workp == 0) {
1409 			trace_rcu_utilization("End CPU kthread@rcu_wait");
1410 			*statusp = RCU_KTHREAD_WAITING;
1411 			return;
1412 		}
1413 	}
1414 	*statusp = RCU_KTHREAD_YIELDING;
1415 	trace_rcu_utilization("Start CPU kthread@rcu_yield");
1416 	schedule_timeout_interruptible(2);
1417 	trace_rcu_utilization("End CPU kthread@rcu_yield");
1418 	*statusp = RCU_KTHREAD_WAITING;
1419 }
1420 
1421 /*
1422  * Set the per-rcu_node kthread's affinity to cover all CPUs that are
1423  * served by the rcu_node in question.  The CPU hotplug lock is still
1424  * held, so the value of rnp->qsmaskinit will be stable.
1425  *
1426  * We don't include outgoingcpu in the affinity set, use -1 if there is
1427  * no outgoing CPU.  If there are no CPUs left in the affinity set,
1428  * this function allows the kthread to execute on any CPU.
1429  */
rcu_boost_kthread_setaffinity(struct rcu_node * rnp,int outgoingcpu)1430 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1431 {
1432 	struct task_struct *t = rnp->boost_kthread_task;
1433 	unsigned long mask = rnp->qsmaskinit;
1434 	cpumask_var_t cm;
1435 	int cpu;
1436 
1437 	if (!t)
1438 		return;
1439 	if (!zalloc_cpumask_var(&cm, GFP_KERNEL))
1440 		return;
1441 	for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1)
1442 		if ((mask & 0x1) && cpu != outgoingcpu)
1443 			cpumask_set_cpu(cpu, cm);
1444 	if (cpumask_weight(cm) == 0) {
1445 		cpumask_setall(cm);
1446 		for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++)
1447 			cpumask_clear_cpu(cpu, cm);
1448 		WARN_ON_ONCE(cpumask_weight(cm) == 0);
1449 	}
1450 	set_cpus_allowed_ptr(t, cm);
1451 	free_cpumask_var(cm);
1452 }
1453 
1454 static struct smp_hotplug_thread rcu_cpu_thread_spec = {
1455 	.store			= &rcu_cpu_kthread_task,
1456 	.thread_should_run	= rcu_cpu_kthread_should_run,
1457 	.thread_fn		= rcu_cpu_kthread,
1458 	.thread_comm		= "rcuc/%u",
1459 	.setup			= rcu_cpu_kthread_setup,
1460 	.park			= rcu_cpu_kthread_park,
1461 };
1462 
1463 /*
1464  * Spawn all kthreads -- called as soon as the scheduler is running.
1465  */
rcu_spawn_kthreads(void)1466 static int __init rcu_spawn_kthreads(void)
1467 {
1468 	struct rcu_node *rnp;
1469 	int cpu;
1470 
1471 	rcu_scheduler_fully_active = 1;
1472 	for_each_possible_cpu(cpu)
1473 		per_cpu(rcu_cpu_has_work, cpu) = 0;
1474 	BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
1475 	rnp = rcu_get_root(rcu_state);
1476 	(void)rcu_spawn_one_boost_kthread(rcu_state, rnp);
1477 	if (NUM_RCU_NODES > 1) {
1478 		rcu_for_each_leaf_node(rcu_state, rnp)
1479 			(void)rcu_spawn_one_boost_kthread(rcu_state, rnp);
1480 	}
1481 	return 0;
1482 }
1483 early_initcall(rcu_spawn_kthreads);
1484 
rcu_prepare_kthreads(int cpu)1485 static void __cpuinit rcu_prepare_kthreads(int cpu)
1486 {
1487 	struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
1488 	struct rcu_node *rnp = rdp->mynode;
1489 
1490 	/* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
1491 	if (rcu_scheduler_fully_active)
1492 		(void)rcu_spawn_one_boost_kthread(rcu_state, rnp);
1493 }
1494 
1495 #else /* #ifdef CONFIG_RCU_BOOST */
1496 
rcu_initiate_boost(struct rcu_node * rnp,unsigned long flags)1497 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1498 {
1499 	raw_spin_unlock_irqrestore(&rnp->lock, flags);
1500 }
1501 
invoke_rcu_callbacks_kthread(void)1502 static void invoke_rcu_callbacks_kthread(void)
1503 {
1504 	WARN_ON_ONCE(1);
1505 }
1506 
rcu_is_callbacks_kthread(void)1507 static bool rcu_is_callbacks_kthread(void)
1508 {
1509 	return false;
1510 }
1511 
rcu_preempt_boost_start_gp(struct rcu_node * rnp)1512 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1513 {
1514 }
1515 
rcu_boost_kthread_setaffinity(struct rcu_node * rnp,int outgoingcpu)1516 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1517 {
1518 }
1519 
rcu_scheduler_really_started(void)1520 static int __init rcu_scheduler_really_started(void)
1521 {
1522 	rcu_scheduler_fully_active = 1;
1523 	return 0;
1524 }
1525 early_initcall(rcu_scheduler_really_started);
1526 
rcu_prepare_kthreads(int cpu)1527 static void __cpuinit rcu_prepare_kthreads(int cpu)
1528 {
1529 }
1530 
1531 #endif /* #else #ifdef CONFIG_RCU_BOOST */
1532 
1533 #if !defined(CONFIG_RCU_FAST_NO_HZ)
1534 
1535 /*
1536  * Check to see if any future RCU-related work will need to be done
1537  * by the current CPU, even if none need be done immediately, returning
1538  * 1 if so.  This function is part of the RCU implementation; it is -not-
1539  * an exported member of the RCU API.
1540  *
1541  * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs
1542  * any flavor of RCU.
1543  */
rcu_needs_cpu(int cpu,unsigned long * delta_jiffies)1544 int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
1545 {
1546 	*delta_jiffies = ULONG_MAX;
1547 	return rcu_cpu_has_callbacks(cpu, NULL);
1548 }
1549 
1550 /*
1551  * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
1552  * after it.
1553  */
rcu_cleanup_after_idle(int cpu)1554 static void rcu_cleanup_after_idle(int cpu)
1555 {
1556 }
1557 
1558 /*
1559  * Do the idle-entry grace-period work, which, because CONFIG_RCU_FAST_NO_HZ=n,
1560  * is nothing.
1561  */
rcu_prepare_for_idle(int cpu)1562 static void rcu_prepare_for_idle(int cpu)
1563 {
1564 }
1565 
1566 /*
1567  * Don't bother keeping a running count of the number of RCU callbacks
1568  * posted because CONFIG_RCU_FAST_NO_HZ=n.
1569  */
rcu_idle_count_callbacks_posted(void)1570 static void rcu_idle_count_callbacks_posted(void)
1571 {
1572 }
1573 
1574 #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
1575 
1576 /*
1577  * This code is invoked when a CPU goes idle, at which point we want
1578  * to have the CPU do everything required for RCU so that it can enter
1579  * the energy-efficient dyntick-idle mode.  This is handled by a
1580  * state machine implemented by rcu_prepare_for_idle() below.
1581  *
1582  * The following three proprocessor symbols control this state machine:
1583  *
1584  * RCU_IDLE_GP_DELAY gives the number of jiffies that a CPU is permitted
1585  *	to sleep in dyntick-idle mode with RCU callbacks pending.  This
1586  *	is sized to be roughly one RCU grace period.  Those energy-efficiency
1587  *	benchmarkers who might otherwise be tempted to set this to a large
1588  *	number, be warned: Setting RCU_IDLE_GP_DELAY too high can hang your
1589  *	system.  And if you are -that- concerned about energy efficiency,
1590  *	just power the system down and be done with it!
1591  * RCU_IDLE_LAZY_GP_DELAY gives the number of jiffies that a CPU is
1592  *	permitted to sleep in dyntick-idle mode with only lazy RCU
1593  *	callbacks pending.  Setting this too high can OOM your system.
1594  *
1595  * The values below work well in practice.  If future workloads require
1596  * adjustment, they can be converted into kernel config parameters, though
1597  * making the state machine smarter might be a better option.
1598  */
1599 #define RCU_IDLE_GP_DELAY 4		/* Roughly one grace period. */
1600 #define RCU_IDLE_LAZY_GP_DELAY (6 * HZ)	/* Roughly six seconds. */
1601 
1602 static int rcu_idle_gp_delay = RCU_IDLE_GP_DELAY;
1603 module_param(rcu_idle_gp_delay, int, 0644);
1604 static int rcu_idle_lazy_gp_delay = RCU_IDLE_LAZY_GP_DELAY;
1605 module_param(rcu_idle_lazy_gp_delay, int, 0644);
1606 
1607 extern int tick_nohz_enabled;
1608 
1609 /*
1610  * Try to advance callbacks for all flavors of RCU on the current CPU.
1611  * Afterwards, if there are any callbacks ready for immediate invocation,
1612  * return true.
1613  */
rcu_try_advance_all_cbs(void)1614 static bool rcu_try_advance_all_cbs(void)
1615 {
1616 	bool cbs_ready = false;
1617 	struct rcu_data *rdp;
1618 	struct rcu_node *rnp;
1619 	struct rcu_state *rsp;
1620 
1621 	for_each_rcu_flavor(rsp) {
1622 		rdp = this_cpu_ptr(rsp->rda);
1623 		rnp = rdp->mynode;
1624 
1625 		/*
1626 		 * Don't bother checking unless a grace period has
1627 		 * completed since we last checked and there are
1628 		 * callbacks not yet ready to invoke.
1629 		 */
1630 		if (rdp->completed != rnp->completed &&
1631 		    rdp->nxttail[RCU_DONE_TAIL] != rdp->nxttail[RCU_NEXT_TAIL])
1632 			rcu_process_gp_end(rsp, rdp);
1633 
1634 		if (cpu_has_callbacks_ready_to_invoke(rdp))
1635 			cbs_ready = true;
1636 	}
1637 	return cbs_ready;
1638 }
1639 
1640 /*
1641  * Allow the CPU to enter dyntick-idle mode unless it has callbacks ready
1642  * to invoke.  If the CPU has callbacks, try to advance them.  Tell the
1643  * caller to set the timeout based on whether or not there are non-lazy
1644  * callbacks.
1645  *
1646  * The caller must have disabled interrupts.
1647  */
rcu_needs_cpu(int cpu,unsigned long * dj)1648 int rcu_needs_cpu(int cpu, unsigned long *dj)
1649 {
1650 	struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
1651 
1652 	/* Snapshot to detect later posting of non-lazy callback. */
1653 	rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted;
1654 
1655 	/* If no callbacks, RCU doesn't need the CPU. */
1656 	if (!rcu_cpu_has_callbacks(cpu, &rdtp->all_lazy)) {
1657 		*dj = ULONG_MAX;
1658 		return 0;
1659 	}
1660 
1661 	/* Attempt to advance callbacks. */
1662 	if (rcu_try_advance_all_cbs()) {
1663 		/* Some ready to invoke, so initiate later invocation. */
1664 		invoke_rcu_core();
1665 		return 1;
1666 	}
1667 	rdtp->last_accelerate = jiffies;
1668 
1669 	/* Request timer delay depending on laziness, and round. */
1670 	if (!rdtp->all_lazy) {
1671 		*dj = round_up(rcu_idle_gp_delay + jiffies,
1672 			       rcu_idle_gp_delay) - jiffies;
1673 	} else {
1674 		*dj = round_jiffies(rcu_idle_lazy_gp_delay + jiffies) - jiffies;
1675 	}
1676 	return 0;
1677 }
1678 
1679 /*
1680  * Prepare a CPU for idle from an RCU perspective.  The first major task
1681  * is to sense whether nohz mode has been enabled or disabled via sysfs.
1682  * The second major task is to check to see if a non-lazy callback has
1683  * arrived at a CPU that previously had only lazy callbacks.  The third
1684  * major task is to accelerate (that is, assign grace-period numbers to)
1685  * any recently arrived callbacks.
1686  *
1687  * The caller must have disabled interrupts.
1688  */
rcu_prepare_for_idle(int cpu)1689 static void rcu_prepare_for_idle(int cpu)
1690 {
1691 	struct rcu_data *rdp;
1692 	struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
1693 	struct rcu_node *rnp;
1694 	struct rcu_state *rsp;
1695 	int tne;
1696 
1697 	/* Handle nohz enablement switches conservatively. */
1698 	tne = ACCESS_ONCE(tick_nohz_enabled);
1699 	if (tne != rdtp->tick_nohz_enabled_snap) {
1700 		if (rcu_cpu_has_callbacks(cpu, NULL))
1701 			invoke_rcu_core(); /* force nohz to see update. */
1702 		rdtp->tick_nohz_enabled_snap = tne;
1703 		return;
1704 	}
1705 	if (!tne)
1706 		return;
1707 
1708 	/* If this is a no-CBs CPU, no callbacks, just return. */
1709 	if (rcu_is_nocb_cpu(cpu))
1710 		return;
1711 
1712 	/*
1713 	 * If a non-lazy callback arrived at a CPU having only lazy
1714 	 * callbacks, invoke RCU core for the side-effect of recalculating
1715 	 * idle duration on re-entry to idle.
1716 	 */
1717 	if (rdtp->all_lazy &&
1718 	    rdtp->nonlazy_posted != rdtp->nonlazy_posted_snap) {
1719 		invoke_rcu_core();
1720 		return;
1721 	}
1722 
1723 	/*
1724 	 * If we have not yet accelerated this jiffy, accelerate all
1725 	 * callbacks on this CPU.
1726 	 */
1727 	if (rdtp->last_accelerate == jiffies)
1728 		return;
1729 	rdtp->last_accelerate = jiffies;
1730 	for_each_rcu_flavor(rsp) {
1731 		rdp = per_cpu_ptr(rsp->rda, cpu);
1732 		if (!*rdp->nxttail[RCU_DONE_TAIL])
1733 			continue;
1734 		rnp = rdp->mynode;
1735 		raw_spin_lock(&rnp->lock); /* irqs already disabled. */
1736 		rcu_accelerate_cbs(rsp, rnp, rdp);
1737 		raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
1738 	}
1739 }
1740 
1741 /*
1742  * Clean up for exit from idle.  Attempt to advance callbacks based on
1743  * any grace periods that elapsed while the CPU was idle, and if any
1744  * callbacks are now ready to invoke, initiate invocation.
1745  */
rcu_cleanup_after_idle(int cpu)1746 static void rcu_cleanup_after_idle(int cpu)
1747 {
1748 	struct rcu_data *rdp;
1749 	struct rcu_state *rsp;
1750 
1751 	if (rcu_is_nocb_cpu(cpu))
1752 		return;
1753 	rcu_try_advance_all_cbs();
1754 	for_each_rcu_flavor(rsp) {
1755 		rdp = per_cpu_ptr(rsp->rda, cpu);
1756 		if (cpu_has_callbacks_ready_to_invoke(rdp))
1757 			invoke_rcu_core();
1758 	}
1759 }
1760 
1761 /*
1762  * Keep a running count of the number of non-lazy callbacks posted
1763  * on this CPU.  This running counter (which is never decremented) allows
1764  * rcu_prepare_for_idle() to detect when something out of the idle loop
1765  * posts a callback, even if an equal number of callbacks are invoked.
1766  * Of course, callbacks should only be posted from within a trace event
1767  * designed to be called from idle or from within RCU_NONIDLE().
1768  */
rcu_idle_count_callbacks_posted(void)1769 static void rcu_idle_count_callbacks_posted(void)
1770 {
1771 	__this_cpu_add(rcu_dynticks.nonlazy_posted, 1);
1772 }
1773 
1774 /*
1775  * Data for flushing lazy RCU callbacks at OOM time.
1776  */
1777 static atomic_t oom_callback_count;
1778 static DECLARE_WAIT_QUEUE_HEAD(oom_callback_wq);
1779 
1780 /*
1781  * RCU OOM callback -- decrement the outstanding count and deliver the
1782  * wake-up if we are the last one.
1783  */
rcu_oom_callback(struct rcu_head * rhp)1784 static void rcu_oom_callback(struct rcu_head *rhp)
1785 {
1786 	if (atomic_dec_and_test(&oom_callback_count))
1787 		wake_up(&oom_callback_wq);
1788 }
1789 
1790 /*
1791  * Post an rcu_oom_notify callback on the current CPU if it has at
1792  * least one lazy callback.  This will unnecessarily post callbacks
1793  * to CPUs that already have a non-lazy callback at the end of their
1794  * callback list, but this is an infrequent operation, so accept some
1795  * extra overhead to keep things simple.
1796  */
rcu_oom_notify_cpu(void * unused)1797 static void rcu_oom_notify_cpu(void *unused)
1798 {
1799 	struct rcu_state *rsp;
1800 	struct rcu_data *rdp;
1801 
1802 	for_each_rcu_flavor(rsp) {
1803 		rdp = __this_cpu_ptr(rsp->rda);
1804 		if (rdp->qlen_lazy != 0) {
1805 			atomic_inc(&oom_callback_count);
1806 			rsp->call(&rdp->oom_head, rcu_oom_callback);
1807 		}
1808 	}
1809 }
1810 
1811 /*
1812  * If low on memory, ensure that each CPU has a non-lazy callback.
1813  * This will wake up CPUs that have only lazy callbacks, in turn
1814  * ensuring that they free up the corresponding memory in a timely manner.
1815  * Because an uncertain amount of memory will be freed in some uncertain
1816  * timeframe, we do not claim to have freed anything.
1817  */
rcu_oom_notify(struct notifier_block * self,unsigned long notused,void * nfreed)1818 static int rcu_oom_notify(struct notifier_block *self,
1819 			  unsigned long notused, void *nfreed)
1820 {
1821 	int cpu;
1822 
1823 	/* Wait for callbacks from earlier instance to complete. */
1824 	wait_event(oom_callback_wq, atomic_read(&oom_callback_count) == 0);
1825 
1826 	/*
1827 	 * Prevent premature wakeup: ensure that all increments happen
1828 	 * before there is a chance of the counter reaching zero.
1829 	 */
1830 	atomic_set(&oom_callback_count, 1);
1831 
1832 	get_online_cpus();
1833 	for_each_online_cpu(cpu) {
1834 		smp_call_function_single(cpu, rcu_oom_notify_cpu, NULL, 1);
1835 		cond_resched();
1836 	}
1837 	put_online_cpus();
1838 
1839 	/* Unconditionally decrement: no need to wake ourselves up. */
1840 	atomic_dec(&oom_callback_count);
1841 
1842 	return NOTIFY_OK;
1843 }
1844 
1845 static struct notifier_block rcu_oom_nb = {
1846 	.notifier_call = rcu_oom_notify
1847 };
1848 
rcu_register_oom_notifier(void)1849 static int __init rcu_register_oom_notifier(void)
1850 {
1851 	register_oom_notifier(&rcu_oom_nb);
1852 	return 0;
1853 }
1854 early_initcall(rcu_register_oom_notifier);
1855 
1856 #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
1857 
1858 #ifdef CONFIG_RCU_CPU_STALL_INFO
1859 
1860 #ifdef CONFIG_RCU_FAST_NO_HZ
1861 
print_cpu_stall_fast_no_hz(char * cp,int cpu)1862 static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
1863 {
1864 	struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
1865 	unsigned long nlpd = rdtp->nonlazy_posted - rdtp->nonlazy_posted_snap;
1866 
1867 	sprintf(cp, "last_accelerate: %04lx/%04lx, nonlazy_posted: %ld, %c%c",
1868 		rdtp->last_accelerate & 0xffff, jiffies & 0xffff,
1869 		ulong2long(nlpd),
1870 		rdtp->all_lazy ? 'L' : '.',
1871 		rdtp->tick_nohz_enabled_snap ? '.' : 'D');
1872 }
1873 
1874 #else /* #ifdef CONFIG_RCU_FAST_NO_HZ */
1875 
print_cpu_stall_fast_no_hz(char * cp,int cpu)1876 static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
1877 {
1878 	*cp = '\0';
1879 }
1880 
1881 #endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */
1882 
1883 /* Initiate the stall-info list. */
print_cpu_stall_info_begin(void)1884 static void print_cpu_stall_info_begin(void)
1885 {
1886 	printk(KERN_CONT "\n");
1887 }
1888 
1889 /*
1890  * Print out diagnostic information for the specified stalled CPU.
1891  *
1892  * If the specified CPU is aware of the current RCU grace period
1893  * (flavor specified by rsp), then print the number of scheduling
1894  * clock interrupts the CPU has taken during the time that it has
1895  * been aware.  Otherwise, print the number of RCU grace periods
1896  * that this CPU is ignorant of, for example, "1" if the CPU was
1897  * aware of the previous grace period.
1898  *
1899  * Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info.
1900  */
print_cpu_stall_info(struct rcu_state * rsp,int cpu)1901 static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
1902 {
1903 	char fast_no_hz[72];
1904 	struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
1905 	struct rcu_dynticks *rdtp = rdp->dynticks;
1906 	char *ticks_title;
1907 	unsigned long ticks_value;
1908 
1909 	if (rsp->gpnum == rdp->gpnum) {
1910 		ticks_title = "ticks this GP";
1911 		ticks_value = rdp->ticks_this_gp;
1912 	} else {
1913 		ticks_title = "GPs behind";
1914 		ticks_value = rsp->gpnum - rdp->gpnum;
1915 	}
1916 	print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
1917 	printk(KERN_ERR "\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u %s\n",
1918 	       cpu, ticks_value, ticks_title,
1919 	       atomic_read(&rdtp->dynticks) & 0xfff,
1920 	       rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
1921 	       rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
1922 	       fast_no_hz);
1923 }
1924 
1925 /* Terminate the stall-info list. */
print_cpu_stall_info_end(void)1926 static void print_cpu_stall_info_end(void)
1927 {
1928 	printk(KERN_ERR "\t");
1929 }
1930 
1931 /* Zero ->ticks_this_gp for all flavors of RCU. */
zero_cpu_stall_ticks(struct rcu_data * rdp)1932 static void zero_cpu_stall_ticks(struct rcu_data *rdp)
1933 {
1934 	rdp->ticks_this_gp = 0;
1935 	rdp->softirq_snap = kstat_softirqs_cpu(RCU_SOFTIRQ, smp_processor_id());
1936 }
1937 
1938 /* Increment ->ticks_this_gp for all flavors of RCU. */
increment_cpu_stall_ticks(void)1939 static void increment_cpu_stall_ticks(void)
1940 {
1941 	struct rcu_state *rsp;
1942 
1943 	for_each_rcu_flavor(rsp)
1944 		__this_cpu_ptr(rsp->rda)->ticks_this_gp++;
1945 }
1946 
1947 #else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
1948 
print_cpu_stall_info_begin(void)1949 static void print_cpu_stall_info_begin(void)
1950 {
1951 	printk(KERN_CONT " {");
1952 }
1953 
print_cpu_stall_info(struct rcu_state * rsp,int cpu)1954 static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
1955 {
1956 	printk(KERN_CONT " %d", cpu);
1957 }
1958 
print_cpu_stall_info_end(void)1959 static void print_cpu_stall_info_end(void)
1960 {
1961 	printk(KERN_CONT "} ");
1962 }
1963 
zero_cpu_stall_ticks(struct rcu_data * rdp)1964 static void zero_cpu_stall_ticks(struct rcu_data *rdp)
1965 {
1966 }
1967 
increment_cpu_stall_ticks(void)1968 static void increment_cpu_stall_ticks(void)
1969 {
1970 }
1971 
1972 #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_INFO */
1973 
1974 #ifdef CONFIG_RCU_NOCB_CPU
1975 
1976 /*
1977  * Offload callback processing from the boot-time-specified set of CPUs
1978  * specified by rcu_nocb_mask.  For each CPU in the set, there is a
1979  * kthread created that pulls the callbacks from the corresponding CPU,
1980  * waits for a grace period to elapse, and invokes the callbacks.
1981  * The no-CBs CPUs do a wake_up() on their kthread when they insert
1982  * a callback into any empty list, unless the rcu_nocb_poll boot parameter
1983  * has been specified, in which case each kthread actively polls its
1984  * CPU.  (Which isn't so great for energy efficiency, but which does
1985  * reduce RCU's overhead on that CPU.)
1986  *
1987  * This is intended to be used in conjunction with Frederic Weisbecker's
1988  * adaptive-idle work, which would seriously reduce OS jitter on CPUs
1989  * running CPU-bound user-mode computations.
1990  *
1991  * Offloading of callback processing could also in theory be used as
1992  * an energy-efficiency measure because CPUs with no RCU callbacks
1993  * queued are more aggressive about entering dyntick-idle mode.
1994  */
1995 
1996 
1997 /* Parse the boot-time rcu_nocb_mask CPU list from the kernel parameters. */
rcu_nocb_setup(char * str)1998 static int __init rcu_nocb_setup(char *str)
1999 {
2000 	alloc_bootmem_cpumask_var(&rcu_nocb_mask);
2001 	have_rcu_nocb_mask = true;
2002 	cpulist_parse(str, rcu_nocb_mask);
2003 	return 1;
2004 }
2005 __setup("rcu_nocbs=", rcu_nocb_setup);
2006 
parse_rcu_nocb_poll(char * arg)2007 static int __init parse_rcu_nocb_poll(char *arg)
2008 {
2009 	rcu_nocb_poll = 1;
2010 	return 0;
2011 }
2012 early_param("rcu_nocb_poll", parse_rcu_nocb_poll);
2013 
2014 /*
2015  * Do any no-CBs CPUs need another grace period?
2016  *
2017  * Interrupts must be disabled.  If the caller does not hold the root
2018  * rnp_node structure's ->lock, the results are advisory only.
2019  */
rcu_nocb_needs_gp(struct rcu_state * rsp)2020 static int rcu_nocb_needs_gp(struct rcu_state *rsp)
2021 {
2022 	struct rcu_node *rnp = rcu_get_root(rsp);
2023 
2024 	return rnp->need_future_gp[(ACCESS_ONCE(rnp->completed) + 1) & 0x1];
2025 }
2026 
2027 /*
2028  * Wake up any no-CBs CPUs' kthreads that were waiting on the just-ended
2029  * grace period.
2030  */
rcu_nocb_gp_cleanup(struct rcu_state * rsp,struct rcu_node * rnp)2031 static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
2032 {
2033 	wake_up_all(&rnp->nocb_gp_wq[rnp->completed & 0x1]);
2034 }
2035 
2036 /*
2037  * Set the root rcu_node structure's ->need_future_gp field
2038  * based on the sum of those of all rcu_node structures.  This does
2039  * double-count the root rcu_node structure's requests, but this
2040  * is necessary to handle the possibility of a rcu_nocb_kthread()
2041  * having awakened during the time that the rcu_node structures
2042  * were being updated for the end of the previous grace period.
2043  */
rcu_nocb_gp_set(struct rcu_node * rnp,int nrq)2044 static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq)
2045 {
2046 	rnp->need_future_gp[(rnp->completed + 1) & 0x1] += nrq;
2047 }
2048 
rcu_init_one_nocb(struct rcu_node * rnp)2049 static void rcu_init_one_nocb(struct rcu_node *rnp)
2050 {
2051 	init_waitqueue_head(&rnp->nocb_gp_wq[0]);
2052 	init_waitqueue_head(&rnp->nocb_gp_wq[1]);
2053 }
2054 
2055 /* Is the specified CPU a no-CPUs CPU? */
rcu_is_nocb_cpu(int cpu)2056 bool rcu_is_nocb_cpu(int cpu)
2057 {
2058 	if (have_rcu_nocb_mask)
2059 		return cpumask_test_cpu(cpu, rcu_nocb_mask);
2060 	return false;
2061 }
2062 
2063 /*
2064  * Enqueue the specified string of rcu_head structures onto the specified
2065  * CPU's no-CBs lists.  The CPU is specified by rdp, the head of the
2066  * string by rhp, and the tail of the string by rhtp.  The non-lazy/lazy
2067  * counts are supplied by rhcount and rhcount_lazy.
2068  *
2069  * If warranted, also wake up the kthread servicing this CPUs queues.
2070  */
__call_rcu_nocb_enqueue(struct rcu_data * rdp,struct rcu_head * rhp,struct rcu_head ** rhtp,int rhcount,int rhcount_lazy)2071 static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
2072 				    struct rcu_head *rhp,
2073 				    struct rcu_head **rhtp,
2074 				    int rhcount, int rhcount_lazy)
2075 {
2076 	int len;
2077 	struct rcu_head **old_rhpp;
2078 	struct task_struct *t;
2079 
2080 	/* Enqueue the callback on the nocb list and update counts. */
2081 	old_rhpp = xchg(&rdp->nocb_tail, rhtp);
2082 	ACCESS_ONCE(*old_rhpp) = rhp;
2083 	atomic_long_add(rhcount, &rdp->nocb_q_count);
2084 	atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
2085 
2086 	/* If we are not being polled and there is a kthread, awaken it ... */
2087 	t = ACCESS_ONCE(rdp->nocb_kthread);
2088 	if (rcu_nocb_poll | !t)
2089 		return;
2090 	len = atomic_long_read(&rdp->nocb_q_count);
2091 	if (old_rhpp == &rdp->nocb_head) {
2092 		wake_up(&rdp->nocb_wq); /* ... only if queue was empty ... */
2093 		rdp->qlen_last_fqs_check = 0;
2094 	} else if (len > rdp->qlen_last_fqs_check + qhimark) {
2095 		wake_up_process(t); /* ... or if many callbacks queued. */
2096 		rdp->qlen_last_fqs_check = LONG_MAX / 2;
2097 	}
2098 	return;
2099 }
2100 
2101 /*
2102  * This is a helper for __call_rcu(), which invokes this when the normal
2103  * callback queue is inoperable.  If this is not a no-CBs CPU, this
2104  * function returns failure back to __call_rcu(), which can complain
2105  * appropriately.
2106  *
2107  * Otherwise, this function queues the callback where the corresponding
2108  * "rcuo" kthread can find it.
2109  */
__call_rcu_nocb(struct rcu_data * rdp,struct rcu_head * rhp,bool lazy)2110 static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
2111 			    bool lazy)
2112 {
2113 
2114 	if (!rcu_is_nocb_cpu(rdp->cpu))
2115 		return 0;
2116 	__call_rcu_nocb_enqueue(rdp, rhp, &rhp->next, 1, lazy);
2117 	if (__is_kfree_rcu_offset((unsigned long)rhp->func))
2118 		trace_rcu_kfree_callback(rdp->rsp->name, rhp,
2119 					 (unsigned long)rhp->func,
2120 					 rdp->qlen_lazy, rdp->qlen);
2121 	else
2122 		trace_rcu_callback(rdp->rsp->name, rhp,
2123 				   rdp->qlen_lazy, rdp->qlen);
2124 	return 1;
2125 }
2126 
2127 /*
2128  * Adopt orphaned callbacks on a no-CBs CPU, or return 0 if this is
2129  * not a no-CBs CPU.
2130  */
rcu_nocb_adopt_orphan_cbs(struct rcu_state * rsp,struct rcu_data * rdp)2131 static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
2132 						     struct rcu_data *rdp)
2133 {
2134 	long ql = rsp->qlen;
2135 	long qll = rsp->qlen_lazy;
2136 
2137 	/* If this is not a no-CBs CPU, tell the caller to do it the old way. */
2138 	if (!rcu_is_nocb_cpu(smp_processor_id()))
2139 		return 0;
2140 	rsp->qlen = 0;
2141 	rsp->qlen_lazy = 0;
2142 
2143 	/* First, enqueue the donelist, if any.  This preserves CB ordering. */
2144 	if (rsp->orphan_donelist != NULL) {
2145 		__call_rcu_nocb_enqueue(rdp, rsp->orphan_donelist,
2146 					rsp->orphan_donetail, ql, qll);
2147 		ql = qll = 0;
2148 		rsp->orphan_donelist = NULL;
2149 		rsp->orphan_donetail = &rsp->orphan_donelist;
2150 	}
2151 	if (rsp->orphan_nxtlist != NULL) {
2152 		__call_rcu_nocb_enqueue(rdp, rsp->orphan_nxtlist,
2153 					rsp->orphan_nxttail, ql, qll);
2154 		ql = qll = 0;
2155 		rsp->orphan_nxtlist = NULL;
2156 		rsp->orphan_nxttail = &rsp->orphan_nxtlist;
2157 	}
2158 	return 1;
2159 }
2160 
2161 /*
2162  * If necessary, kick off a new grace period, and either way wait
2163  * for a subsequent grace period to complete.
2164  */
rcu_nocb_wait_gp(struct rcu_data * rdp)2165 static void rcu_nocb_wait_gp(struct rcu_data *rdp)
2166 {
2167 	unsigned long c;
2168 	bool d;
2169 	unsigned long flags;
2170 	struct rcu_node *rnp = rdp->mynode;
2171 
2172 	raw_spin_lock_irqsave(&rnp->lock, flags);
2173 	c = rcu_start_future_gp(rnp, rdp);
2174 	raw_spin_unlock_irqrestore(&rnp->lock, flags);
2175 
2176 	/*
2177 	 * Wait for the grace period.  Do so interruptibly to avoid messing
2178 	 * up the load average.
2179 	 */
2180 	trace_rcu_future_gp(rnp, rdp, c, "StartWait");
2181 	for (;;) {
2182 		wait_event_interruptible(
2183 			rnp->nocb_gp_wq[c & 0x1],
2184 			(d = ULONG_CMP_GE(ACCESS_ONCE(rnp->completed), c)));
2185 		if (likely(d))
2186 			break;
2187 		flush_signals(current);
2188 		trace_rcu_future_gp(rnp, rdp, c, "ResumeWait");
2189 	}
2190 	trace_rcu_future_gp(rnp, rdp, c, "EndWait");
2191 	smp_mb(); /* Ensure that CB invocation happens after GP end. */
2192 }
2193 
2194 /*
2195  * Per-rcu_data kthread, but only for no-CBs CPUs.  Each kthread invokes
2196  * callbacks queued by the corresponding no-CBs CPU.
2197  */
rcu_nocb_kthread(void * arg)2198 static int rcu_nocb_kthread(void *arg)
2199 {
2200 	int c, cl;
2201 	struct rcu_head *list;
2202 	struct rcu_head *next;
2203 	struct rcu_head **tail;
2204 	struct rcu_data *rdp = arg;
2205 
2206 	/* Each pass through this loop invokes one batch of callbacks */
2207 	for (;;) {
2208 		/* If not polling, wait for next batch of callbacks. */
2209 		if (!rcu_nocb_poll)
2210 			wait_event_interruptible(rdp->nocb_wq, rdp->nocb_head);
2211 		list = ACCESS_ONCE(rdp->nocb_head);
2212 		if (!list) {
2213 			schedule_timeout_interruptible(1);
2214 			flush_signals(current);
2215 			continue;
2216 		}
2217 
2218 		/*
2219 		 * Extract queued callbacks, update counts, and wait
2220 		 * for a grace period to elapse.
2221 		 */
2222 		ACCESS_ONCE(rdp->nocb_head) = NULL;
2223 		tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
2224 		c = atomic_long_xchg(&rdp->nocb_q_count, 0);
2225 		cl = atomic_long_xchg(&rdp->nocb_q_count_lazy, 0);
2226 		ACCESS_ONCE(rdp->nocb_p_count) += c;
2227 		ACCESS_ONCE(rdp->nocb_p_count_lazy) += cl;
2228 		rcu_nocb_wait_gp(rdp);
2229 
2230 		/* Each pass through the following loop invokes a callback. */
2231 		trace_rcu_batch_start(rdp->rsp->name, cl, c, -1);
2232 		c = cl = 0;
2233 		while (list) {
2234 			next = list->next;
2235 			/* Wait for enqueuing to complete, if needed. */
2236 			while (next == NULL && &list->next != tail) {
2237 				schedule_timeout_interruptible(1);
2238 				next = list->next;
2239 			}
2240 			debug_rcu_head_unqueue(list);
2241 			local_bh_disable();
2242 			if (__rcu_reclaim(rdp->rsp->name, list))
2243 				cl++;
2244 			c++;
2245 			local_bh_enable();
2246 			list = next;
2247 		}
2248 		trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
2249 		ACCESS_ONCE(rdp->nocb_p_count) -= c;
2250 		ACCESS_ONCE(rdp->nocb_p_count_lazy) -= cl;
2251 		rdp->n_nocbs_invoked += c;
2252 	}
2253 	return 0;
2254 }
2255 
2256 /* Initialize per-rcu_data variables for no-CBs CPUs. */
rcu_boot_init_nocb_percpu_data(struct rcu_data * rdp)2257 static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
2258 {
2259 	rdp->nocb_tail = &rdp->nocb_head;
2260 	init_waitqueue_head(&rdp->nocb_wq);
2261 }
2262 
2263 /* Create a kthread for each RCU flavor for each no-CBs CPU. */
rcu_spawn_nocb_kthreads(struct rcu_state * rsp)2264 static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
2265 {
2266 	int cpu;
2267 	struct rcu_data *rdp;
2268 	struct task_struct *t;
2269 
2270 	if (rcu_nocb_mask == NULL)
2271 		return;
2272 	for_each_cpu(cpu, rcu_nocb_mask) {
2273 		rdp = per_cpu_ptr(rsp->rda, cpu);
2274 		t = kthread_run(rcu_nocb_kthread, rdp,
2275 				"rcuo%c/%d", rsp->abbr, cpu);
2276 		BUG_ON(IS_ERR(t));
2277 		ACCESS_ONCE(rdp->nocb_kthread) = t;
2278 	}
2279 }
2280 
2281 /* Prevent __call_rcu() from enqueuing callbacks on no-CBs CPUs */
init_nocb_callback_list(struct rcu_data * rdp)2282 static bool init_nocb_callback_list(struct rcu_data *rdp)
2283 {
2284 	if (rcu_nocb_mask == NULL ||
2285 	    !cpumask_test_cpu(rdp->cpu, rcu_nocb_mask))
2286 		return false;
2287 	rdp->nxttail[RCU_NEXT_TAIL] = NULL;
2288 	return true;
2289 }
2290 
2291 #else /* #ifdef CONFIG_RCU_NOCB_CPU */
2292 
rcu_nocb_needs_gp(struct rcu_state * rsp)2293 static int rcu_nocb_needs_gp(struct rcu_state *rsp)
2294 {
2295 	return 0;
2296 }
2297 
rcu_nocb_gp_cleanup(struct rcu_state * rsp,struct rcu_node * rnp)2298 static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
2299 {
2300 }
2301 
rcu_nocb_gp_set(struct rcu_node * rnp,int nrq)2302 static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq)
2303 {
2304 }
2305 
rcu_init_one_nocb(struct rcu_node * rnp)2306 static void rcu_init_one_nocb(struct rcu_node *rnp)
2307 {
2308 }
2309 
__call_rcu_nocb(struct rcu_data * rdp,struct rcu_head * rhp,bool lazy)2310 static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
2311 			    bool lazy)
2312 {
2313 	return 0;
2314 }
2315 
rcu_nocb_adopt_orphan_cbs(struct rcu_state * rsp,struct rcu_data * rdp)2316 static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
2317 						     struct rcu_data *rdp)
2318 {
2319 	return 0;
2320 }
2321 
rcu_boot_init_nocb_percpu_data(struct rcu_data * rdp)2322 static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
2323 {
2324 }
2325 
rcu_spawn_nocb_kthreads(struct rcu_state * rsp)2326 static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
2327 {
2328 }
2329 
init_nocb_callback_list(struct rcu_data * rdp)2330 static bool init_nocb_callback_list(struct rcu_data *rdp)
2331 {
2332 	return false;
2333 }
2334 
2335 #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
2336 
2337 /*
2338  * An adaptive-ticks CPU can potentially execute in kernel mode for an
2339  * arbitrarily long period of time with the scheduling-clock tick turned
2340  * off.  RCU will be paying attention to this CPU because it is in the
2341  * kernel, but the CPU cannot be guaranteed to be executing the RCU state
2342  * machine because the scheduling-clock tick has been disabled.  Therefore,
2343  * if an adaptive-ticks CPU is failing to respond to the current grace
2344  * period and has not be idle from an RCU perspective, kick it.
2345  */
rcu_kick_nohz_cpu(int cpu)2346 static void rcu_kick_nohz_cpu(int cpu)
2347 {
2348 #ifdef CONFIG_NO_HZ_FULL
2349 	if (tick_nohz_full_cpu(cpu))
2350 		smp_send_reschedule(cpu);
2351 #endif /* #ifdef CONFIG_NO_HZ_FULL */
2352 }
2353