• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Read-Copy Update mechanism for mutual exclusion (tree-based version)
4  *
5  * Copyright IBM Corporation, 2008
6  *
7  * Authors: Dipankar Sarma <dipankar@in.ibm.com>
8  *	    Manfred Spraul <manfred@colorfullife.com>
9  *	    Paul E. McKenney <paulmck@linux.ibm.com>
10  *
11  * Based on the original work by Paul McKenney <paulmck@linux.ibm.com>
12  * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
13  *
14  * For detailed explanation of Read-Copy Update mechanism see -
15  *	Documentation/RCU
16  */
17 
18 #define pr_fmt(fmt) "rcu: " fmt
19 
20 #include <linux/types.h>
21 #include <linux/kernel.h>
22 #include <linux/init.h>
23 #include <linux/spinlock.h>
24 #include <linux/smp.h>
25 #include <linux/rcupdate_wait.h>
26 #include <linux/interrupt.h>
27 #include <linux/sched.h>
28 #include <linux/sched/debug.h>
29 #include <linux/nmi.h>
30 #include <linux/atomic.h>
31 #include <linux/bitops.h>
32 #include <linux/export.h>
33 #include <linux/completion.h>
34 #include <linux/kmemleak.h>
35 #include <linux/moduleparam.h>
36 #include <linux/percpu.h>
37 #include <linux/notifier.h>
38 #include <linux/cpu.h>
39 #include <linux/mutex.h>
40 #include <linux/time.h>
41 #include <linux/kernel_stat.h>
42 #include <linux/wait.h>
43 #include <linux/kthread.h>
44 #include <uapi/linux/sched/types.h>
45 #include <linux/prefetch.h>
46 #include <linux/delay.h>
47 #include <linux/random.h>
48 #include <linux/trace_events.h>
49 #include <linux/suspend.h>
50 #include <linux/ftrace.h>
51 #include <linux/tick.h>
52 #include <linux/sysrq.h>
53 #include <linux/kprobes.h>
54 #include <linux/gfp.h>
55 #include <linux/oom.h>
56 #include <linux/smpboot.h>
57 #include <linux/jiffies.h>
58 #include <linux/slab.h>
59 #include <linux/sched/isolation.h>
60 #include <linux/sched/clock.h>
61 #include <linux/vmalloc.h>
62 #include <linux/mm.h>
63 #include <linux/kasan.h>
64 #include "../time/tick-internal.h"
65 
66 #include "tree.h"
67 #include "rcu.h"
68 
69 #ifdef MODULE_PARAM_PREFIX
70 #undef MODULE_PARAM_PREFIX
71 #endif
72 #define MODULE_PARAM_PREFIX "rcutree."
73 
74 /* Data structures. */
75 
76 /*
77  * Steal a bit from the bottom of ->dynticks for idle entry/exit
78  * control.  Initially this is for TLB flushing.
79  */
80 #define RCU_DYNTICK_CTRL_MASK 0x1
81 #define RCU_DYNTICK_CTRL_CTR  (RCU_DYNTICK_CTRL_MASK + 1)
82 
83 static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = {
84 	.dynticks_nesting = 1,
85 	.dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE,
86 	.dynticks = ATOMIC_INIT(RCU_DYNTICK_CTRL_CTR),
87 };
88 static struct rcu_state rcu_state = {
89 	.level = { &rcu_state.node[0] },
90 	.gp_state = RCU_GP_IDLE,
91 	.gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT,
92 	.barrier_mutex = __MUTEX_INITIALIZER(rcu_state.barrier_mutex),
93 	.name = RCU_NAME,
94 	.abbr = RCU_ABBR,
95 	.exp_mutex = __MUTEX_INITIALIZER(rcu_state.exp_mutex),
96 	.exp_wake_mutex = __MUTEX_INITIALIZER(rcu_state.exp_wake_mutex),
97 	.ofl_lock = __RAW_SPIN_LOCK_UNLOCKED(rcu_state.ofl_lock),
98 };
99 
100 /* Dump rcu_node combining tree at boot to verify correct setup. */
101 static bool dump_tree;
102 module_param(dump_tree, bool, 0444);
103 /* By default, use RCU_SOFTIRQ instead of rcuc kthreads. */
104 static bool use_softirq = true;
105 module_param(use_softirq, bool, 0444);
106 /* Control rcu_node-tree auto-balancing at boot time. */
107 static bool rcu_fanout_exact;
108 module_param(rcu_fanout_exact, bool, 0444);
109 /* Increase (but not decrease) the RCU_FANOUT_LEAF at boot time. */
110 static int rcu_fanout_leaf = RCU_FANOUT_LEAF;
111 module_param(rcu_fanout_leaf, int, 0444);
112 int rcu_num_lvls __read_mostly = RCU_NUM_LVLS;
113 /* Number of rcu_nodes at specified level. */
114 int num_rcu_lvl[] = NUM_RCU_LVL_INIT;
115 int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
116 
117 /*
118  * The rcu_scheduler_active variable is initialized to the value
119  * RCU_SCHEDULER_INACTIVE and transitions RCU_SCHEDULER_INIT just before the
120  * first task is spawned.  So when this variable is RCU_SCHEDULER_INACTIVE,
121  * RCU can assume that there is but one task, allowing RCU to (for example)
122  * optimize synchronize_rcu() to a simple barrier().  When this variable
123  * is RCU_SCHEDULER_INIT, RCU must actually do all the hard work required
124  * to detect real grace periods.  This variable is also used to suppress
125  * boot-time false positives from lockdep-RCU error checking.  Finally, it
126  * transitions from RCU_SCHEDULER_INIT to RCU_SCHEDULER_RUNNING after RCU
127  * is fully initialized, including all of its kthreads having been spawned.
128  */
129 int rcu_scheduler_active __read_mostly;
130 EXPORT_SYMBOL_GPL(rcu_scheduler_active);
131 
132 /*
133  * The rcu_scheduler_fully_active variable transitions from zero to one
134  * during the early_initcall() processing, which is after the scheduler
135  * is capable of creating new tasks.  So RCU processing (for example,
136  * creating tasks for RCU priority boosting) must be delayed until after
137  * rcu_scheduler_fully_active transitions from zero to one.  We also
138  * currently delay invocation of any RCU callbacks until after this point.
139  *
140  * It might later prove better for people registering RCU callbacks during
141  * early boot to take responsibility for these callbacks, but one step at
142  * a time.
143  */
144 static int rcu_scheduler_fully_active __read_mostly;
145 
146 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
147 			      unsigned long gps, unsigned long flags);
148 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
149 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
150 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
151 static void invoke_rcu_core(void);
152 static void rcu_report_exp_rdp(struct rcu_data *rdp);
153 static void sync_sched_exp_online_cleanup(int cpu);
154 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp);
155 
156 /* rcuc/rcub kthread realtime priority */
157 static int kthread_prio = IS_ENABLED(CONFIG_RCU_BOOST) ? 1 : 0;
158 module_param(kthread_prio, int, 0444);
159 
160 /* Delay in jiffies for grace-period initialization delays, debug only. */
161 
162 static int gp_preinit_delay;
163 module_param(gp_preinit_delay, int, 0444);
164 static int gp_init_delay;
165 module_param(gp_init_delay, int, 0444);
166 static int gp_cleanup_delay;
167 module_param(gp_cleanup_delay, int, 0444);
168 
169 // Add delay to rcu_read_unlock() for strict grace periods.
170 static int rcu_unlock_delay;
171 #ifdef CONFIG_RCU_STRICT_GRACE_PERIOD
172 module_param(rcu_unlock_delay, int, 0444);
173 #endif
174 
175 /*
176  * This rcu parameter is runtime-read-only. It reflects
177  * a minimum allowed number of objects which can be cached
178  * per-CPU. Object size is equal to one page. This value
179  * can be changed at boot time.
180  */
181 static int rcu_min_cached_objs = 5;
182 module_param(rcu_min_cached_objs, int, 0444);
183 
184 /* Retrieve RCU kthreads priority for rcutorture */
rcu_get_gp_kthreads_prio(void)185 int rcu_get_gp_kthreads_prio(void)
186 {
187 	return kthread_prio;
188 }
189 EXPORT_SYMBOL_GPL(rcu_get_gp_kthreads_prio);
190 
191 /*
192  * Number of grace periods between delays, normalized by the duration of
193  * the delay.  The longer the delay, the more the grace periods between
194  * each delay.  The reason for this normalization is that it means that,
195  * for non-zero delays, the overall slowdown of grace periods is constant
196  * regardless of the duration of the delay.  This arrangement balances
197  * the need for long delays to increase some race probabilities with the
198  * need for fast grace periods to increase other race probabilities.
199  */
200 #define PER_RCU_NODE_PERIOD 3	/* Number of grace periods between delays. */
201 
202 /*
203  * Compute the mask of online CPUs for the specified rcu_node structure.
204  * This will not be stable unless the rcu_node structure's ->lock is
205  * held, but the bit corresponding to the current CPU will be stable
206  * in most contexts.
207  */
rcu_rnp_online_cpus(struct rcu_node * rnp)208 static unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
209 {
210 	return READ_ONCE(rnp->qsmaskinitnext);
211 }
212 
213 /*
214  * Return true if an RCU grace period is in progress.  The READ_ONCE()s
215  * permit this function to be invoked without holding the root rcu_node
216  * structure's ->lock, but of course results can be subject to change.
217  */
rcu_gp_in_progress(void)218 static int rcu_gp_in_progress(void)
219 {
220 	return rcu_seq_state(rcu_seq_current(&rcu_state.gp_seq));
221 }
222 
223 /*
224  * Return the number of callbacks queued on the specified CPU.
225  * Handles both the nocbs and normal cases.
226  */
rcu_get_n_cbs_cpu(int cpu)227 static long rcu_get_n_cbs_cpu(int cpu)
228 {
229 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
230 
231 	if (rcu_segcblist_is_enabled(&rdp->cblist))
232 		return rcu_segcblist_n_cbs(&rdp->cblist);
233 	return 0;
234 }
235 
rcu_softirq_qs(void)236 void rcu_softirq_qs(void)
237 {
238 	rcu_qs();
239 	rcu_preempt_deferred_qs(current);
240 }
241 
242 /*
243  * Record entry into an extended quiescent state.  This is only to be
244  * called when not already in an extended quiescent state, that is,
245  * RCU is watching prior to the call to this function and is no longer
246  * watching upon return.
247  */
rcu_dynticks_eqs_enter(void)248 static noinstr void rcu_dynticks_eqs_enter(void)
249 {
250 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
251 	int seq;
252 
253 	/*
254 	 * CPUs seeing atomic_add_return() must see prior RCU read-side
255 	 * critical sections, and we also must force ordering with the
256 	 * next idle sojourn.
257 	 */
258 	rcu_dynticks_task_trace_enter();  // Before ->dynticks update!
259 	seq = arch_atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
260 	// RCU is no longer watching.  Better be in extended quiescent state!
261 	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
262 		     (seq & RCU_DYNTICK_CTRL_CTR));
263 	/* Better not have special action (TLB flush) pending! */
264 	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
265 		     (seq & RCU_DYNTICK_CTRL_MASK));
266 }
267 
268 /*
269  * Record exit from an extended quiescent state.  This is only to be
270  * called from an extended quiescent state, that is, RCU is not watching
271  * prior to the call to this function and is watching upon return.
272  */
rcu_dynticks_eqs_exit(void)273 static noinstr void rcu_dynticks_eqs_exit(void)
274 {
275 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
276 	int seq;
277 
278 	/*
279 	 * CPUs seeing atomic_add_return() must see prior idle sojourns,
280 	 * and we also must force ordering with the next RCU read-side
281 	 * critical section.
282 	 */
283 	seq = arch_atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
284 	// RCU is now watching.  Better not be in an extended quiescent state!
285 	rcu_dynticks_task_trace_exit();  // After ->dynticks update!
286 	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
287 		     !(seq & RCU_DYNTICK_CTRL_CTR));
288 	if (seq & RCU_DYNTICK_CTRL_MASK) {
289 		arch_atomic_andnot(RCU_DYNTICK_CTRL_MASK, &rdp->dynticks);
290 		smp_mb__after_atomic(); /* _exit after clearing mask. */
291 	}
292 }
293 
294 /*
295  * Reset the current CPU's ->dynticks counter to indicate that the
296  * newly onlined CPU is no longer in an extended quiescent state.
297  * This will either leave the counter unchanged, or increment it
298  * to the next non-quiescent value.
299  *
300  * The non-atomic test/increment sequence works because the upper bits
301  * of the ->dynticks counter are manipulated only by the corresponding CPU,
302  * or when the corresponding CPU is offline.
303  */
rcu_dynticks_eqs_online(void)304 static void rcu_dynticks_eqs_online(void)
305 {
306 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
307 
308 	if (atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR)
309 		return;
310 	atomic_add(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
311 }
312 
313 /*
314  * Is the current CPU in an extended quiescent state?
315  *
316  * No ordering, as we are sampling CPU-local information.
317  */
rcu_dynticks_curr_cpu_in_eqs(void)318 static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void)
319 {
320 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
321 
322 	return !(arch_atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR);
323 }
324 
325 /*
326  * Snapshot the ->dynticks counter with full ordering so as to allow
327  * stable comparison of this counter with past and future snapshots.
328  */
rcu_dynticks_snap(struct rcu_data * rdp)329 static int rcu_dynticks_snap(struct rcu_data *rdp)
330 {
331 	int snap = atomic_add_return(0, &rdp->dynticks);
332 
333 	return snap & ~RCU_DYNTICK_CTRL_MASK;
334 }
335 
336 /*
337  * Return true if the snapshot returned from rcu_dynticks_snap()
338  * indicates that RCU is in an extended quiescent state.
339  */
rcu_dynticks_in_eqs(int snap)340 static bool rcu_dynticks_in_eqs(int snap)
341 {
342 	return !(snap & RCU_DYNTICK_CTRL_CTR);
343 }
344 
345 /*
346  * Return true if the CPU corresponding to the specified rcu_data
347  * structure has spent some time in an extended quiescent state since
348  * rcu_dynticks_snap() returned the specified snapshot.
349  */
rcu_dynticks_in_eqs_since(struct rcu_data * rdp,int snap)350 static bool rcu_dynticks_in_eqs_since(struct rcu_data *rdp, int snap)
351 {
352 	return snap != rcu_dynticks_snap(rdp);
353 }
354 
355 /*
356  * Return true if the referenced integer is zero while the specified
357  * CPU remains within a single extended quiescent state.
358  */
rcu_dynticks_zero_in_eqs(int cpu,int * vp)359 bool rcu_dynticks_zero_in_eqs(int cpu, int *vp)
360 {
361 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
362 	int snap;
363 
364 	// If not quiescent, force back to earlier extended quiescent state.
365 	snap = atomic_read(&rdp->dynticks) & ~(RCU_DYNTICK_CTRL_MASK |
366 					       RCU_DYNTICK_CTRL_CTR);
367 
368 	smp_rmb(); // Order ->dynticks and *vp reads.
369 	if (READ_ONCE(*vp))
370 		return false;  // Non-zero, so report failure;
371 	smp_rmb(); // Order *vp read and ->dynticks re-read.
372 
373 	// If still in the same extended quiescent state, we are good!
374 	return snap == (atomic_read(&rdp->dynticks) & ~RCU_DYNTICK_CTRL_MASK);
375 }
376 
377 /*
378  * Set the special (bottom) bit of the specified CPU so that it
379  * will take special action (such as flushing its TLB) on the
380  * next exit from an extended quiescent state.  Returns true if
381  * the bit was successfully set, or false if the CPU was not in
382  * an extended quiescent state.
383  */
rcu_eqs_special_set(int cpu)384 bool rcu_eqs_special_set(int cpu)
385 {
386 	int old;
387 	int new;
388 	int new_old;
389 	struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
390 
391 	new_old = atomic_read(&rdp->dynticks);
392 	do {
393 		old = new_old;
394 		if (old & RCU_DYNTICK_CTRL_CTR)
395 			return false;
396 		new = old | RCU_DYNTICK_CTRL_MASK;
397 		new_old = atomic_cmpxchg(&rdp->dynticks, old, new);
398 	} while (new_old != old);
399 	return true;
400 }
401 
402 /*
403  * Let the RCU core know that this CPU has gone through the scheduler,
404  * which is a quiescent state.  This is called when the need for a
405  * quiescent state is urgent, so we burn an atomic operation and full
406  * memory barriers to let the RCU core know about it, regardless of what
407  * this CPU might (or might not) do in the near future.
408  *
409  * We inform the RCU core by emulating a zero-duration dyntick-idle period.
410  *
411  * The caller must have disabled interrupts and must not be idle.
412  */
rcu_momentary_dyntick_idle(void)413 notrace void rcu_momentary_dyntick_idle(void)
414 {
415 	int special;
416 
417 	raw_cpu_write(rcu_data.rcu_need_heavy_qs, false);
418 	special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR,
419 				    &this_cpu_ptr(&rcu_data)->dynticks);
420 	/* It is illegal to call this from idle state. */
421 	WARN_ON_ONCE(!(special & RCU_DYNTICK_CTRL_CTR));
422 	rcu_preempt_deferred_qs(current);
423 }
424 EXPORT_SYMBOL_GPL(rcu_momentary_dyntick_idle);
425 
426 /**
427  * rcu_is_cpu_rrupt_from_idle - see if 'interrupted' from idle
428  *
429  * If the current CPU is idle and running at a first-level (not nested)
430  * interrupt, or directly, from idle, return true.
431  *
432  * The caller must have at least disabled IRQs.
433  */
rcu_is_cpu_rrupt_from_idle(void)434 static int rcu_is_cpu_rrupt_from_idle(void)
435 {
436 	long nesting;
437 
438 	/*
439 	 * Usually called from the tick; but also used from smp_function_call()
440 	 * for expedited grace periods. This latter can result in running from
441 	 * the idle task, instead of an actual IPI.
442 	 */
443 	lockdep_assert_irqs_disabled();
444 
445 	/* Check for counter underflows */
446 	RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) < 0,
447 			 "RCU dynticks_nesting counter underflow!");
448 	RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) <= 0,
449 			 "RCU dynticks_nmi_nesting counter underflow/zero!");
450 
451 	/* Are we at first interrupt nesting level? */
452 	nesting = __this_cpu_read(rcu_data.dynticks_nmi_nesting);
453 	if (nesting > 1)
454 		return false;
455 
456 	/*
457 	 * If we're not in an interrupt, we must be in the idle task!
458 	 */
459 	WARN_ON_ONCE(!nesting && !is_idle_task(current));
460 
461 	/* Does CPU appear to be idle from an RCU standpoint? */
462 	return __this_cpu_read(rcu_data.dynticks_nesting) == 0;
463 }
464 
465 #define DEFAULT_RCU_BLIMIT (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 1000 : 10)
466 				// Maximum callbacks per rcu_do_batch ...
467 #define DEFAULT_MAX_RCU_BLIMIT 10000 // ... even during callback flood.
468 static long blimit = DEFAULT_RCU_BLIMIT;
469 #define DEFAULT_RCU_QHIMARK 10000 // If this many pending, ignore blimit.
470 static long qhimark = DEFAULT_RCU_QHIMARK;
471 #define DEFAULT_RCU_QLOMARK 100   // Once only this many pending, use blimit.
472 static long qlowmark = DEFAULT_RCU_QLOMARK;
473 #define DEFAULT_RCU_QOVLD_MULT 2
474 #define DEFAULT_RCU_QOVLD (DEFAULT_RCU_QOVLD_MULT * DEFAULT_RCU_QHIMARK)
475 static long qovld = DEFAULT_RCU_QOVLD; // If this many pending, hammer QS.
476 static long qovld_calc = -1;	  // No pre-initialization lock acquisitions!
477 
478 module_param(blimit, long, 0444);
479 module_param(qhimark, long, 0444);
480 module_param(qlowmark, long, 0444);
481 module_param(qovld, long, 0444);
482 
483 static ulong jiffies_till_first_fqs = IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 0 : ULONG_MAX;
484 static ulong jiffies_till_next_fqs = ULONG_MAX;
485 static bool rcu_kick_kthreads;
486 static int rcu_divisor = 7;
487 module_param(rcu_divisor, int, 0644);
488 
489 /* Force an exit from rcu_do_batch() after 3 milliseconds. */
490 static long rcu_resched_ns = 3 * NSEC_PER_MSEC;
491 module_param(rcu_resched_ns, long, 0644);
492 
493 /*
494  * How long the grace period must be before we start recruiting
495  * quiescent-state help from rcu_note_context_switch().
496  */
497 static ulong jiffies_till_sched_qs = ULONG_MAX;
498 module_param(jiffies_till_sched_qs, ulong, 0444);
499 static ulong jiffies_to_sched_qs; /* See adjust_jiffies_till_sched_qs(). */
500 module_param(jiffies_to_sched_qs, ulong, 0444); /* Display only! */
501 
502 /*
503  * Make sure that we give the grace-period kthread time to detect any
504  * idle CPUs before taking active measures to force quiescent states.
505  * However, don't go below 100 milliseconds, adjusted upwards for really
506  * large systems.
507  */
adjust_jiffies_till_sched_qs(void)508 static void adjust_jiffies_till_sched_qs(void)
509 {
510 	unsigned long j;
511 
512 	/* If jiffies_till_sched_qs was specified, respect the request. */
513 	if (jiffies_till_sched_qs != ULONG_MAX) {
514 		WRITE_ONCE(jiffies_to_sched_qs, jiffies_till_sched_qs);
515 		return;
516 	}
517 	/* Otherwise, set to third fqs scan, but bound below on large system. */
518 	j = READ_ONCE(jiffies_till_first_fqs) +
519 		      2 * READ_ONCE(jiffies_till_next_fqs);
520 	if (j < HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV)
521 		j = HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
522 	pr_info("RCU calculated value of scheduler-enlistment delay is %ld jiffies.\n", j);
523 	WRITE_ONCE(jiffies_to_sched_qs, j);
524 }
525 
param_set_first_fqs_jiffies(const char * val,const struct kernel_param * kp)526 static int param_set_first_fqs_jiffies(const char *val, const struct kernel_param *kp)
527 {
528 	ulong j;
529 	int ret = kstrtoul(val, 0, &j);
530 
531 	if (!ret) {
532 		WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : j);
533 		adjust_jiffies_till_sched_qs();
534 	}
535 	return ret;
536 }
537 
param_set_next_fqs_jiffies(const char * val,const struct kernel_param * kp)538 static int param_set_next_fqs_jiffies(const char *val, const struct kernel_param *kp)
539 {
540 	ulong j;
541 	int ret = kstrtoul(val, 0, &j);
542 
543 	if (!ret) {
544 		WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : (j ?: 1));
545 		adjust_jiffies_till_sched_qs();
546 	}
547 	return ret;
548 }
549 
550 static struct kernel_param_ops first_fqs_jiffies_ops = {
551 	.set = param_set_first_fqs_jiffies,
552 	.get = param_get_ulong,
553 };
554 
555 static struct kernel_param_ops next_fqs_jiffies_ops = {
556 	.set = param_set_next_fqs_jiffies,
557 	.get = param_get_ulong,
558 };
559 
560 module_param_cb(jiffies_till_first_fqs, &first_fqs_jiffies_ops, &jiffies_till_first_fqs, 0644);
561 module_param_cb(jiffies_till_next_fqs, &next_fqs_jiffies_ops, &jiffies_till_next_fqs, 0644);
562 module_param(rcu_kick_kthreads, bool, 0644);
563 
564 static void force_qs_rnp(int (*f)(struct rcu_data *rdp));
565 static int rcu_pending(int user);
566 
567 /*
568  * Return the number of RCU GPs completed thus far for debug & stats.
569  */
rcu_get_gp_seq(void)570 unsigned long rcu_get_gp_seq(void)
571 {
572 	return READ_ONCE(rcu_state.gp_seq);
573 }
574 EXPORT_SYMBOL_GPL(rcu_get_gp_seq);
575 
576 /*
577  * Return the number of RCU expedited batches completed thus far for
578  * debug & stats.  Odd numbers mean that a batch is in progress, even
579  * numbers mean idle.  The value returned will thus be roughly double
580  * the cumulative batches since boot.
581  */
rcu_exp_batches_completed(void)582 unsigned long rcu_exp_batches_completed(void)
583 {
584 	return rcu_state.expedited_sequence;
585 }
586 EXPORT_SYMBOL_GPL(rcu_exp_batches_completed);
587 
588 /*
589  * Return the root node of the rcu_state structure.
590  */
rcu_get_root(void)591 static struct rcu_node *rcu_get_root(void)
592 {
593 	return &rcu_state.node[0];
594 }
595 
596 /*
597  * Send along grace-period-related data for rcutorture diagnostics.
598  */
rcutorture_get_gp_data(enum rcutorture_type test_type,int * flags,unsigned long * gp_seq)599 void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
600 			    unsigned long *gp_seq)
601 {
602 	switch (test_type) {
603 	case RCU_FLAVOR:
604 		*flags = READ_ONCE(rcu_state.gp_flags);
605 		*gp_seq = rcu_seq_current(&rcu_state.gp_seq);
606 		break;
607 	default:
608 		break;
609 	}
610 }
611 EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
612 
613 /*
614  * Enter an RCU extended quiescent state, which can be either the
615  * idle loop or adaptive-tickless usermode execution.
616  *
617  * We crowbar the ->dynticks_nmi_nesting field to zero to allow for
618  * the possibility of usermode upcalls having messed up our count
619  * of interrupt nesting level during the prior busy period.
620  */
rcu_eqs_enter(bool user)621 static noinstr void rcu_eqs_enter(bool user)
622 {
623 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
624 
625 	WARN_ON_ONCE(rdp->dynticks_nmi_nesting != DYNTICK_IRQ_NONIDLE);
626 	WRITE_ONCE(rdp->dynticks_nmi_nesting, 0);
627 	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
628 		     rdp->dynticks_nesting == 0);
629 	if (rdp->dynticks_nesting != 1) {
630 		// RCU will still be watching, so just do accounting and leave.
631 		rdp->dynticks_nesting--;
632 		return;
633 	}
634 
635 	lockdep_assert_irqs_disabled();
636 	instrumentation_begin();
637 	trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, atomic_read(&rdp->dynticks));
638 	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
639 	rdp = this_cpu_ptr(&rcu_data);
640 	rcu_prepare_for_idle();
641 	rcu_preempt_deferred_qs(current);
642 
643 	// instrumentation for the noinstr rcu_dynticks_eqs_enter()
644 	instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
645 
646 	instrumentation_end();
647 	WRITE_ONCE(rdp->dynticks_nesting, 0); /* Avoid irq-access tearing. */
648 	// RCU is watching here ...
649 	rcu_dynticks_eqs_enter();
650 	// ... but is no longer watching here.
651 	rcu_dynticks_task_enter();
652 }
653 
654 /**
655  * rcu_idle_enter - inform RCU that current CPU is entering idle
656  *
657  * Enter idle mode, in other words, -leave- the mode in which RCU
658  * read-side critical sections can occur.  (Though RCU read-side
659  * critical sections can occur in irq handlers in idle, a possibility
660  * handled by irq_enter() and irq_exit().)
661  *
662  * If you add or remove a call to rcu_idle_enter(), be sure to test with
663  * CONFIG_RCU_EQS_DEBUG=y.
664  */
rcu_idle_enter(void)665 void rcu_idle_enter(void)
666 {
667 	lockdep_assert_irqs_disabled();
668 	rcu_eqs_enter(false);
669 }
670 EXPORT_SYMBOL_GPL(rcu_idle_enter);
671 
672 #ifdef CONFIG_NO_HZ_FULL
673 /**
674  * rcu_user_enter - inform RCU that we are resuming userspace.
675  *
676  * Enter RCU idle mode right before resuming userspace.  No use of RCU
677  * is permitted between this call and rcu_user_exit(). This way the
678  * CPU doesn't need to maintain the tick for RCU maintenance purposes
679  * when the CPU runs in userspace.
680  *
681  * If you add or remove a call to rcu_user_enter(), be sure to test with
682  * CONFIG_RCU_EQS_DEBUG=y.
683  */
rcu_user_enter(void)684 noinstr void rcu_user_enter(void)
685 {
686 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
687 
688 	lockdep_assert_irqs_disabled();
689 
690 	instrumentation_begin();
691 	do_nocb_deferred_wakeup(rdp);
692 	instrumentation_end();
693 
694 	rcu_eqs_enter(true);
695 }
696 #endif /* CONFIG_NO_HZ_FULL */
697 
698 /**
699  * rcu_nmi_exit - inform RCU of exit from NMI context
700  *
701  * If we are returning from the outermost NMI handler that interrupted an
702  * RCU-idle period, update rdp->dynticks and rdp->dynticks_nmi_nesting
703  * to let the RCU grace-period handling know that the CPU is back to
704  * being RCU-idle.
705  *
706  * If you add or remove a call to rcu_nmi_exit(), be sure to test
707  * with CONFIG_RCU_EQS_DEBUG=y.
708  */
rcu_nmi_exit(void)709 noinstr void rcu_nmi_exit(void)
710 {
711 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
712 
713 	instrumentation_begin();
714 	/*
715 	 * Check for ->dynticks_nmi_nesting underflow and bad ->dynticks.
716 	 * (We are exiting an NMI handler, so RCU better be paying attention
717 	 * to us!)
718 	 */
719 	WARN_ON_ONCE(rdp->dynticks_nmi_nesting <= 0);
720 	WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs());
721 
722 	/*
723 	 * If the nesting level is not 1, the CPU wasn't RCU-idle, so
724 	 * leave it in non-RCU-idle state.
725 	 */
726 	if (rdp->dynticks_nmi_nesting != 1) {
727 		trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2,
728 				  atomic_read(&rdp->dynticks));
729 		WRITE_ONCE(rdp->dynticks_nmi_nesting, /* No store tearing. */
730 			   rdp->dynticks_nmi_nesting - 2);
731 		instrumentation_end();
732 		return;
733 	}
734 
735 	/* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
736 	trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, atomic_read(&rdp->dynticks));
737 	WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */
738 
739 	if (!in_nmi())
740 		rcu_prepare_for_idle();
741 
742 	// instrumentation for the noinstr rcu_dynticks_eqs_enter()
743 	instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
744 	instrumentation_end();
745 
746 	// RCU is watching here ...
747 	rcu_dynticks_eqs_enter();
748 	// ... but is no longer watching here.
749 
750 	if (!in_nmi())
751 		rcu_dynticks_task_enter();
752 }
753 
754 /**
755  * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle
756  *
757  * Exit from an interrupt handler, which might possibly result in entering
758  * idle mode, in other words, leaving the mode in which read-side critical
759  * sections can occur.  The caller must have disabled interrupts.
760  *
761  * This code assumes that the idle loop never does anything that might
762  * result in unbalanced calls to irq_enter() and irq_exit().  If your
763  * architecture's idle loop violates this assumption, RCU will give you what
764  * you deserve, good and hard.  But very infrequently and irreproducibly.
765  *
766  * Use things like work queues to work around this limitation.
767  *
768  * You have been warned.
769  *
770  * If you add or remove a call to rcu_irq_exit(), be sure to test with
771  * CONFIG_RCU_EQS_DEBUG=y.
772  */
rcu_irq_exit(void)773 void noinstr rcu_irq_exit(void)
774 {
775 	lockdep_assert_irqs_disabled();
776 	rcu_nmi_exit();
777 }
778 
779 /**
780  * rcu_irq_exit_preempt - Inform RCU that current CPU is exiting irq
781  *			  towards in kernel preemption
782  *
783  * Same as rcu_irq_exit() but has a sanity check that scheduling is safe
784  * from RCU point of view. Invoked from return from interrupt before kernel
785  * preemption.
786  */
rcu_irq_exit_preempt(void)787 void rcu_irq_exit_preempt(void)
788 {
789 	lockdep_assert_irqs_disabled();
790 	rcu_nmi_exit();
791 
792 	RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) <= 0,
793 			 "RCU dynticks_nesting counter underflow/zero!");
794 	RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) !=
795 			 DYNTICK_IRQ_NONIDLE,
796 			 "Bad RCU  dynticks_nmi_nesting counter\n");
797 	RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
798 			 "RCU in extended quiescent state!");
799 }
800 
801 #ifdef CONFIG_PROVE_RCU
802 /**
803  * rcu_irq_exit_check_preempt - Validate that scheduling is possible
804  */
rcu_irq_exit_check_preempt(void)805 void rcu_irq_exit_check_preempt(void)
806 {
807 	lockdep_assert_irqs_disabled();
808 
809 	RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) <= 0,
810 			 "RCU dynticks_nesting counter underflow/zero!");
811 	RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) !=
812 			 DYNTICK_IRQ_NONIDLE,
813 			 "Bad RCU  dynticks_nmi_nesting counter\n");
814 	RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
815 			 "RCU in extended quiescent state!");
816 }
817 #endif /* #ifdef CONFIG_PROVE_RCU */
818 
819 /*
820  * Wrapper for rcu_irq_exit() where interrupts are enabled.
821  *
822  * If you add or remove a call to rcu_irq_exit_irqson(), be sure to test
823  * with CONFIG_RCU_EQS_DEBUG=y.
824  */
rcu_irq_exit_irqson(void)825 void rcu_irq_exit_irqson(void)
826 {
827 	unsigned long flags;
828 
829 	local_irq_save(flags);
830 	rcu_irq_exit();
831 	local_irq_restore(flags);
832 }
833 
834 /*
835  * Exit an RCU extended quiescent state, which can be either the
836  * idle loop or adaptive-tickless usermode execution.
837  *
838  * We crowbar the ->dynticks_nmi_nesting field to DYNTICK_IRQ_NONIDLE to
839  * allow for the possibility of usermode upcalls messing up our count of
840  * interrupt nesting level during the busy period that is just now starting.
841  */
rcu_eqs_exit(bool user)842 static void noinstr rcu_eqs_exit(bool user)
843 {
844 	struct rcu_data *rdp;
845 	long oldval;
846 
847 	lockdep_assert_irqs_disabled();
848 	rdp = this_cpu_ptr(&rcu_data);
849 	oldval = rdp->dynticks_nesting;
850 	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0);
851 	if (oldval) {
852 		// RCU was already watching, so just do accounting and leave.
853 		rdp->dynticks_nesting++;
854 		return;
855 	}
856 	rcu_dynticks_task_exit();
857 	// RCU is not watching here ...
858 	rcu_dynticks_eqs_exit();
859 	// ... but is watching here.
860 	instrumentation_begin();
861 
862 	// instrumentation for the noinstr rcu_dynticks_eqs_exit()
863 	instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
864 
865 	rcu_cleanup_after_idle();
866 	trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, atomic_read(&rdp->dynticks));
867 	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
868 	WRITE_ONCE(rdp->dynticks_nesting, 1);
869 	WARN_ON_ONCE(rdp->dynticks_nmi_nesting);
870 	WRITE_ONCE(rdp->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE);
871 	instrumentation_end();
872 }
873 
874 /**
875  * rcu_idle_exit - inform RCU that current CPU is leaving idle
876  *
877  * Exit idle mode, in other words, -enter- the mode in which RCU
878  * read-side critical sections can occur.
879  *
880  * If you add or remove a call to rcu_idle_exit(), be sure to test with
881  * CONFIG_RCU_EQS_DEBUG=y.
882  */
rcu_idle_exit(void)883 void rcu_idle_exit(void)
884 {
885 	unsigned long flags;
886 
887 	local_irq_save(flags);
888 	rcu_eqs_exit(false);
889 	local_irq_restore(flags);
890 }
891 EXPORT_SYMBOL_GPL(rcu_idle_exit);
892 
893 #ifdef CONFIG_NO_HZ_FULL
894 /**
895  * rcu_user_exit - inform RCU that we are exiting userspace.
896  *
897  * Exit RCU idle mode while entering the kernel because it can
898  * run a RCU read side critical section anytime.
899  *
900  * If you add or remove a call to rcu_user_exit(), be sure to test with
901  * CONFIG_RCU_EQS_DEBUG=y.
902  */
rcu_user_exit(void)903 void noinstr rcu_user_exit(void)
904 {
905 	rcu_eqs_exit(1);
906 }
907 
908 /**
909  * __rcu_irq_enter_check_tick - Enable scheduler tick on CPU if RCU needs it.
910  *
911  * The scheduler tick is not normally enabled when CPUs enter the kernel
912  * from nohz_full userspace execution.  After all, nohz_full userspace
913  * execution is an RCU quiescent state and the time executing in the kernel
914  * is quite short.  Except of course when it isn't.  And it is not hard to
915  * cause a large system to spend tens of seconds or even minutes looping
916  * in the kernel, which can cause a number of problems, include RCU CPU
917  * stall warnings.
918  *
919  * Therefore, if a nohz_full CPU fails to report a quiescent state
920  * in a timely manner, the RCU grace-period kthread sets that CPU's
921  * ->rcu_urgent_qs flag with the expectation that the next interrupt or
922  * exception will invoke this function, which will turn on the scheduler
923  * tick, which will enable RCU to detect that CPU's quiescent states,
924  * for example, due to cond_resched() calls in CONFIG_PREEMPT=n kernels.
925  * The tick will be disabled once a quiescent state is reported for
926  * this CPU.
927  *
928  * Of course, in carefully tuned systems, there might never be an
929  * interrupt or exception.  In that case, the RCU grace-period kthread
930  * will eventually cause one to happen.  However, in less carefully
931  * controlled environments, this function allows RCU to get what it
932  * needs without creating otherwise useless interruptions.
933  */
__rcu_irq_enter_check_tick(void)934 void __rcu_irq_enter_check_tick(void)
935 {
936 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
937 
938 	// If we're here from NMI there's nothing to do.
939 	if (in_nmi())
940 		return;
941 
942 	RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
943 			 "Illegal rcu_irq_enter_check_tick() from extended quiescent state");
944 
945 	if (!tick_nohz_full_cpu(rdp->cpu) ||
946 	    !READ_ONCE(rdp->rcu_urgent_qs) ||
947 	    READ_ONCE(rdp->rcu_forced_tick)) {
948 		// RCU doesn't need nohz_full help from this CPU, or it is
949 		// already getting that help.
950 		return;
951 	}
952 
953 	// We get here only when not in an extended quiescent state and
954 	// from interrupts (as opposed to NMIs).  Therefore, (1) RCU is
955 	// already watching and (2) The fact that we are in an interrupt
956 	// handler and that the rcu_node lock is an irq-disabled lock
957 	// prevents self-deadlock.  So we can safely recheck under the lock.
958 	// Note that the nohz_full state currently cannot change.
959 	raw_spin_lock_rcu_node(rdp->mynode);
960 	if (rdp->rcu_urgent_qs && !rdp->rcu_forced_tick) {
961 		// A nohz_full CPU is in the kernel and RCU needs a
962 		// quiescent state.  Turn on the tick!
963 		WRITE_ONCE(rdp->rcu_forced_tick, true);
964 		tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
965 	}
966 	raw_spin_unlock_rcu_node(rdp->mynode);
967 }
968 NOKPROBE_SYMBOL(__rcu_irq_enter_check_tick);
969 #endif /* CONFIG_NO_HZ_FULL */
970 
971 /**
972  * rcu_nmi_enter - inform RCU of entry to NMI context
973  *
974  * If the CPU was idle from RCU's viewpoint, update rdp->dynticks and
975  * rdp->dynticks_nmi_nesting to let the RCU grace-period handling know
976  * that the CPU is active.  This implementation permits nested NMIs, as
977  * long as the nesting level does not overflow an int.  (You will probably
978  * run out of stack space first.)
979  *
980  * If you add or remove a call to rcu_nmi_enter(), be sure to test
981  * with CONFIG_RCU_EQS_DEBUG=y.
982  */
rcu_nmi_enter(void)983 noinstr void rcu_nmi_enter(void)
984 {
985 	long incby = 2;
986 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
987 
988 	/* Complain about underflow. */
989 	WARN_ON_ONCE(rdp->dynticks_nmi_nesting < 0);
990 
991 	/*
992 	 * If idle from RCU viewpoint, atomically increment ->dynticks
993 	 * to mark non-idle and increment ->dynticks_nmi_nesting by one.
994 	 * Otherwise, increment ->dynticks_nmi_nesting by two.  This means
995 	 * if ->dynticks_nmi_nesting is equal to one, we are guaranteed
996 	 * to be in the outermost NMI handler that interrupted an RCU-idle
997 	 * period (observation due to Andy Lutomirski).
998 	 */
999 	if (rcu_dynticks_curr_cpu_in_eqs()) {
1000 
1001 		if (!in_nmi())
1002 			rcu_dynticks_task_exit();
1003 
1004 		// RCU is not watching here ...
1005 		rcu_dynticks_eqs_exit();
1006 		// ... but is watching here.
1007 
1008 		if (!in_nmi()) {
1009 			instrumentation_begin();
1010 			rcu_cleanup_after_idle();
1011 			instrumentation_end();
1012 		}
1013 
1014 		instrumentation_begin();
1015 		// instrumentation for the noinstr rcu_dynticks_curr_cpu_in_eqs()
1016 		instrument_atomic_read(&rdp->dynticks, sizeof(rdp->dynticks));
1017 		// instrumentation for the noinstr rcu_dynticks_eqs_exit()
1018 		instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
1019 
1020 		incby = 1;
1021 	} else if (!in_nmi()) {
1022 		instrumentation_begin();
1023 		rcu_irq_enter_check_tick();
1024 	} else  {
1025 		instrumentation_begin();
1026 	}
1027 
1028 	trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="),
1029 			  rdp->dynticks_nmi_nesting,
1030 			  rdp->dynticks_nmi_nesting + incby, atomic_read(&rdp->dynticks));
1031 	instrumentation_end();
1032 	WRITE_ONCE(rdp->dynticks_nmi_nesting, /* Prevent store tearing. */
1033 		   rdp->dynticks_nmi_nesting + incby);
1034 	barrier();
1035 }
1036 
1037 /**
1038  * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
1039  *
1040  * Enter an interrupt handler, which might possibly result in exiting
1041  * idle mode, in other words, entering the mode in which read-side critical
1042  * sections can occur.  The caller must have disabled interrupts.
1043  *
1044  * Note that the Linux kernel is fully capable of entering an interrupt
1045  * handler that it never exits, for example when doing upcalls to user mode!
1046  * This code assumes that the idle loop never does upcalls to user mode.
1047  * If your architecture's idle loop does do upcalls to user mode (or does
1048  * anything else that results in unbalanced calls to the irq_enter() and
1049  * irq_exit() functions), RCU will give you what you deserve, good and hard.
1050  * But very infrequently and irreproducibly.
1051  *
1052  * Use things like work queues to work around this limitation.
1053  *
1054  * You have been warned.
1055  *
1056  * If you add or remove a call to rcu_irq_enter(), be sure to test with
1057  * CONFIG_RCU_EQS_DEBUG=y.
1058  */
rcu_irq_enter(void)1059 noinstr void rcu_irq_enter(void)
1060 {
1061 	lockdep_assert_irqs_disabled();
1062 	rcu_nmi_enter();
1063 }
1064 
1065 /*
1066  * Wrapper for rcu_irq_enter() where interrupts are enabled.
1067  *
1068  * If you add or remove a call to rcu_irq_enter_irqson(), be sure to test
1069  * with CONFIG_RCU_EQS_DEBUG=y.
1070  */
rcu_irq_enter_irqson(void)1071 void rcu_irq_enter_irqson(void)
1072 {
1073 	unsigned long flags;
1074 
1075 	local_irq_save(flags);
1076 	rcu_irq_enter();
1077 	local_irq_restore(flags);
1078 }
1079 
1080 /*
1081  * If any sort of urgency was applied to the current CPU (for example,
1082  * the scheduler-clock interrupt was enabled on a nohz_full CPU) in order
1083  * to get to a quiescent state, disable it.
1084  */
rcu_disable_urgency_upon_qs(struct rcu_data * rdp)1085 static void rcu_disable_urgency_upon_qs(struct rcu_data *rdp)
1086 {
1087 	raw_lockdep_assert_held_rcu_node(rdp->mynode);
1088 	WRITE_ONCE(rdp->rcu_urgent_qs, false);
1089 	WRITE_ONCE(rdp->rcu_need_heavy_qs, false);
1090 	if (tick_nohz_full_cpu(rdp->cpu) && rdp->rcu_forced_tick) {
1091 		tick_dep_clear_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
1092 		WRITE_ONCE(rdp->rcu_forced_tick, false);
1093 	}
1094 }
1095 
1096 /**
1097  * rcu_is_watching - see if RCU thinks that the current CPU is not idle
1098  *
1099  * Return true if RCU is watching the running CPU, which means that this
1100  * CPU can safely enter RCU read-side critical sections.  In other words,
1101  * if the current CPU is not in its idle loop or is in an interrupt or
1102  * NMI handler, return true.
1103  *
1104  * Make notrace because it can be called by the internal functions of
1105  * ftrace, and making this notrace removes unnecessary recursion calls.
1106  */
rcu_is_watching(void)1107 notrace bool rcu_is_watching(void)
1108 {
1109 	bool ret;
1110 
1111 	preempt_disable_notrace();
1112 	ret = !rcu_dynticks_curr_cpu_in_eqs();
1113 	preempt_enable_notrace();
1114 	return ret;
1115 }
1116 EXPORT_SYMBOL_GPL(rcu_is_watching);
1117 
1118 /*
1119  * If a holdout task is actually running, request an urgent quiescent
1120  * state from its CPU.  This is unsynchronized, so migrations can cause
1121  * the request to go to the wrong CPU.  Which is OK, all that will happen
1122  * is that the CPU's next context switch will be a bit slower and next
1123  * time around this task will generate another request.
1124  */
rcu_request_urgent_qs_task(struct task_struct * t)1125 void rcu_request_urgent_qs_task(struct task_struct *t)
1126 {
1127 	int cpu;
1128 
1129 	barrier();
1130 	cpu = task_cpu(t);
1131 	if (!task_curr(t))
1132 		return; /* This task is not running on that CPU. */
1133 	smp_store_release(per_cpu_ptr(&rcu_data.rcu_urgent_qs, cpu), true);
1134 }
1135 
1136 #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
1137 
1138 /*
1139  * Is the current CPU online as far as RCU is concerned?
1140  *
1141  * Disable preemption to avoid false positives that could otherwise
1142  * happen due to the current CPU number being sampled, this task being
1143  * preempted, its old CPU being taken offline, resuming on some other CPU,
1144  * then determining that its old CPU is now offline.
1145  *
1146  * Disable checking if in an NMI handler because we cannot safely
1147  * report errors from NMI handlers anyway.  In addition, it is OK to use
1148  * RCU on an offline processor during initial boot, hence the check for
1149  * rcu_scheduler_fully_active.
1150  */
rcu_lockdep_current_cpu_online(void)1151 bool rcu_lockdep_current_cpu_online(void)
1152 {
1153 	struct rcu_data *rdp;
1154 	struct rcu_node *rnp;
1155 	bool ret = false;
1156 
1157 	if (in_nmi() || !rcu_scheduler_fully_active)
1158 		return true;
1159 	preempt_disable_notrace();
1160 	rdp = this_cpu_ptr(&rcu_data);
1161 	rnp = rdp->mynode;
1162 	if (rdp->grpmask & rcu_rnp_online_cpus(rnp) || READ_ONCE(rnp->ofl_seq) & 0x1)
1163 		ret = true;
1164 	preempt_enable_notrace();
1165 	return ret;
1166 }
1167 EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
1168 
1169 #endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */
1170 
1171 /*
1172  * We are reporting a quiescent state on behalf of some other CPU, so
1173  * it is our responsibility to check for and handle potential overflow
1174  * of the rcu_node ->gp_seq counter with respect to the rcu_data counters.
1175  * After all, the CPU might be in deep idle state, and thus executing no
1176  * code whatsoever.
1177  */
rcu_gpnum_ovf(struct rcu_node * rnp,struct rcu_data * rdp)1178 static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp)
1179 {
1180 	raw_lockdep_assert_held_rcu_node(rnp);
1181 	if (ULONG_CMP_LT(rcu_seq_current(&rdp->gp_seq) + ULONG_MAX / 4,
1182 			 rnp->gp_seq))
1183 		WRITE_ONCE(rdp->gpwrap, true);
1184 	if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq))
1185 		rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4;
1186 }
1187 
1188 /*
1189  * Snapshot the specified CPU's dynticks counter so that we can later
1190  * credit them with an implicit quiescent state.  Return 1 if this CPU
1191  * is in dynticks idle mode, which is an extended quiescent state.
1192  */
dyntick_save_progress_counter(struct rcu_data * rdp)1193 static int dyntick_save_progress_counter(struct rcu_data *rdp)
1194 {
1195 	rdp->dynticks_snap = rcu_dynticks_snap(rdp);
1196 	if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) {
1197 		trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
1198 		rcu_gpnum_ovf(rdp->mynode, rdp);
1199 		return 1;
1200 	}
1201 	return 0;
1202 }
1203 
1204 /*
1205  * Return true if the specified CPU has passed through a quiescent
1206  * state by virtue of being in or having passed through an dynticks
1207  * idle state since the last call to dyntick_save_progress_counter()
1208  * for this same CPU, or by virtue of having been offline.
1209  */
rcu_implicit_dynticks_qs(struct rcu_data * rdp)1210 static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
1211 {
1212 	unsigned long jtsq;
1213 	bool *rnhqp;
1214 	bool *ruqp;
1215 	struct rcu_node *rnp = rdp->mynode;
1216 
1217 	/*
1218 	 * If the CPU passed through or entered a dynticks idle phase with
1219 	 * no active irq/NMI handlers, then we can safely pretend that the CPU
1220 	 * already acknowledged the request to pass through a quiescent
1221 	 * state.  Either way, that CPU cannot possibly be in an RCU
1222 	 * read-side critical section that started before the beginning
1223 	 * of the current RCU grace period.
1224 	 */
1225 	if (rcu_dynticks_in_eqs_since(rdp, rdp->dynticks_snap)) {
1226 		trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
1227 		rcu_gpnum_ovf(rnp, rdp);
1228 		return 1;
1229 	}
1230 
1231 	/*
1232 	 * Complain if a CPU that is considered to be offline from RCU's
1233 	 * perspective has not yet reported a quiescent state.  After all,
1234 	 * the offline CPU should have reported a quiescent state during
1235 	 * the CPU-offline process, or, failing that, by rcu_gp_init()
1236 	 * if it ran concurrently with either the CPU going offline or the
1237 	 * last task on a leaf rcu_node structure exiting its RCU read-side
1238 	 * critical section while all CPUs corresponding to that structure
1239 	 * are offline.  This added warning detects bugs in any of these
1240 	 * code paths.
1241 	 *
1242 	 * The rcu_node structure's ->lock is held here, which excludes
1243 	 * the relevant portions the CPU-hotplug code, the grace-period
1244 	 * initialization code, and the rcu_read_unlock() code paths.
1245 	 *
1246 	 * For more detail, please refer to the "Hotplug CPU" section
1247 	 * of RCU's Requirements documentation.
1248 	 */
1249 	if (WARN_ON_ONCE(!(rdp->grpmask & rcu_rnp_online_cpus(rnp)))) {
1250 		bool onl;
1251 		struct rcu_node *rnp1;
1252 
1253 		pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n",
1254 			__func__, rnp->grplo, rnp->grphi, rnp->level,
1255 			(long)rnp->gp_seq, (long)rnp->completedqs);
1256 		for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent)
1257 			pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx ->rcu_gp_init_mask %#lx\n",
1258 				__func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext, rnp1->rcu_gp_init_mask);
1259 		onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp));
1260 		pr_info("%s %d: %c online: %ld(%d) offline: %ld(%d)\n",
1261 			__func__, rdp->cpu, ".o"[onl],
1262 			(long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags,
1263 			(long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags);
1264 		return 1; /* Break things loose after complaining. */
1265 	}
1266 
1267 	/*
1268 	 * A CPU running for an extended time within the kernel can
1269 	 * delay RCU grace periods: (1) At age jiffies_to_sched_qs,
1270 	 * set .rcu_urgent_qs, (2) At age 2*jiffies_to_sched_qs, set
1271 	 * both .rcu_need_heavy_qs and .rcu_urgent_qs.  Note that the
1272 	 * unsynchronized assignments to the per-CPU rcu_need_heavy_qs
1273 	 * variable are safe because the assignments are repeated if this
1274 	 * CPU failed to pass through a quiescent state.  This code
1275 	 * also checks .jiffies_resched in case jiffies_to_sched_qs
1276 	 * is set way high.
1277 	 */
1278 	jtsq = READ_ONCE(jiffies_to_sched_qs);
1279 	ruqp = per_cpu_ptr(&rcu_data.rcu_urgent_qs, rdp->cpu);
1280 	rnhqp = &per_cpu(rcu_data.rcu_need_heavy_qs, rdp->cpu);
1281 	if (!READ_ONCE(*rnhqp) &&
1282 	    (time_after(jiffies, rcu_state.gp_start + jtsq * 2) ||
1283 	     time_after(jiffies, rcu_state.jiffies_resched) ||
1284 	     rcu_state.cbovld)) {
1285 		WRITE_ONCE(*rnhqp, true);
1286 		/* Store rcu_need_heavy_qs before rcu_urgent_qs. */
1287 		smp_store_release(ruqp, true);
1288 	} else if (time_after(jiffies, rcu_state.gp_start + jtsq)) {
1289 		WRITE_ONCE(*ruqp, true);
1290 	}
1291 
1292 	/*
1293 	 * NO_HZ_FULL CPUs can run in-kernel without rcu_sched_clock_irq!
1294 	 * The above code handles this, but only for straight cond_resched().
1295 	 * And some in-kernel loops check need_resched() before calling
1296 	 * cond_resched(), which defeats the above code for CPUs that are
1297 	 * running in-kernel with scheduling-clock interrupts disabled.
1298 	 * So hit them over the head with the resched_cpu() hammer!
1299 	 */
1300 	if (tick_nohz_full_cpu(rdp->cpu) &&
1301 	    (time_after(jiffies, READ_ONCE(rdp->last_fqs_resched) + jtsq * 3) ||
1302 	     rcu_state.cbovld)) {
1303 		WRITE_ONCE(*ruqp, true);
1304 		resched_cpu(rdp->cpu);
1305 		WRITE_ONCE(rdp->last_fqs_resched, jiffies);
1306 	}
1307 
1308 	/*
1309 	 * If more than halfway to RCU CPU stall-warning time, invoke
1310 	 * resched_cpu() more frequently to try to loosen things up a bit.
1311 	 * Also check to see if the CPU is getting hammered with interrupts,
1312 	 * but only once per grace period, just to keep the IPIs down to
1313 	 * a dull roar.
1314 	 */
1315 	if (time_after(jiffies, rcu_state.jiffies_resched)) {
1316 		if (time_after(jiffies,
1317 			       READ_ONCE(rdp->last_fqs_resched) + jtsq)) {
1318 			resched_cpu(rdp->cpu);
1319 			WRITE_ONCE(rdp->last_fqs_resched, jiffies);
1320 		}
1321 		if (IS_ENABLED(CONFIG_IRQ_WORK) &&
1322 		    !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
1323 		    (rnp->ffmask & rdp->grpmask)) {
1324 			init_irq_work(&rdp->rcu_iw, rcu_iw_handler);
1325 			atomic_set(&rdp->rcu_iw.flags, IRQ_WORK_HARD_IRQ);
1326 			rdp->rcu_iw_pending = true;
1327 			rdp->rcu_iw_gp_seq = rnp->gp_seq;
1328 			irq_work_queue_on(&rdp->rcu_iw, rdp->cpu);
1329 		}
1330 	}
1331 
1332 	return 0;
1333 }
1334 
1335 /* Trace-event wrapper function for trace_rcu_future_grace_period.  */
trace_rcu_this_gp(struct rcu_node * rnp,struct rcu_data * rdp,unsigned long gp_seq_req,const char * s)1336 static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
1337 			      unsigned long gp_seq_req, const char *s)
1338 {
1339 	trace_rcu_future_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
1340 				      gp_seq_req, rnp->level,
1341 				      rnp->grplo, rnp->grphi, s);
1342 }
1343 
1344 /*
1345  * rcu_start_this_gp - Request the start of a particular grace period
1346  * @rnp_start: The leaf node of the CPU from which to start.
1347  * @rdp: The rcu_data corresponding to the CPU from which to start.
1348  * @gp_seq_req: The gp_seq of the grace period to start.
1349  *
1350  * Start the specified grace period, as needed to handle newly arrived
1351  * callbacks.  The required future grace periods are recorded in each
1352  * rcu_node structure's ->gp_seq_needed field.  Returns true if there
1353  * is reason to awaken the grace-period kthread.
1354  *
1355  * The caller must hold the specified rcu_node structure's ->lock, which
1356  * is why the caller is responsible for waking the grace-period kthread.
1357  *
1358  * Returns true if the GP thread needs to be awakened else false.
1359  */
rcu_start_this_gp(struct rcu_node * rnp_start,struct rcu_data * rdp,unsigned long gp_seq_req)1360 static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp,
1361 			      unsigned long gp_seq_req)
1362 {
1363 	bool ret = false;
1364 	struct rcu_node *rnp;
1365 
1366 	/*
1367 	 * Use funnel locking to either acquire the root rcu_node
1368 	 * structure's lock or bail out if the need for this grace period
1369 	 * has already been recorded -- or if that grace period has in
1370 	 * fact already started.  If there is already a grace period in
1371 	 * progress in a non-leaf node, no recording is needed because the
1372 	 * end of the grace period will scan the leaf rcu_node structures.
1373 	 * Note that rnp_start->lock must not be released.
1374 	 */
1375 	raw_lockdep_assert_held_rcu_node(rnp_start);
1376 	trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, TPS("Startleaf"));
1377 	for (rnp = rnp_start; 1; rnp = rnp->parent) {
1378 		if (rnp != rnp_start)
1379 			raw_spin_lock_rcu_node(rnp);
1380 		if (ULONG_CMP_GE(rnp->gp_seq_needed, gp_seq_req) ||
1381 		    rcu_seq_started(&rnp->gp_seq, gp_seq_req) ||
1382 		    (rnp != rnp_start &&
1383 		     rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))) {
1384 			trace_rcu_this_gp(rnp, rdp, gp_seq_req,
1385 					  TPS("Prestarted"));
1386 			goto unlock_out;
1387 		}
1388 		WRITE_ONCE(rnp->gp_seq_needed, gp_seq_req);
1389 		if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) {
1390 			/*
1391 			 * We just marked the leaf or internal node, and a
1392 			 * grace period is in progress, which means that
1393 			 * rcu_gp_cleanup() will see the marking.  Bail to
1394 			 * reduce contention.
1395 			 */
1396 			trace_rcu_this_gp(rnp_start, rdp, gp_seq_req,
1397 					  TPS("Startedleaf"));
1398 			goto unlock_out;
1399 		}
1400 		if (rnp != rnp_start && rnp->parent != NULL)
1401 			raw_spin_unlock_rcu_node(rnp);
1402 		if (!rnp->parent)
1403 			break;  /* At root, and perhaps also leaf. */
1404 	}
1405 
1406 	/* If GP already in progress, just leave, otherwise start one. */
1407 	if (rcu_gp_in_progress()) {
1408 		trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot"));
1409 		goto unlock_out;
1410 	}
1411 	trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot"));
1412 	WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags | RCU_GP_FLAG_INIT);
1413 	WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
1414 	if (!READ_ONCE(rcu_state.gp_kthread)) {
1415 		trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread"));
1416 		goto unlock_out;
1417 	}
1418 	trace_rcu_grace_period(rcu_state.name, data_race(rcu_state.gp_seq), TPS("newreq"));
1419 	ret = true;  /* Caller must wake GP kthread. */
1420 unlock_out:
1421 	/* Push furthest requested GP to leaf node and rcu_data structure. */
1422 	if (ULONG_CMP_LT(gp_seq_req, rnp->gp_seq_needed)) {
1423 		WRITE_ONCE(rnp_start->gp_seq_needed, rnp->gp_seq_needed);
1424 		WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed);
1425 	}
1426 	if (rnp != rnp_start)
1427 		raw_spin_unlock_rcu_node(rnp);
1428 	return ret;
1429 }
1430 
1431 /*
1432  * Clean up any old requests for the just-ended grace period.  Also return
1433  * whether any additional grace periods have been requested.
1434  */
rcu_future_gp_cleanup(struct rcu_node * rnp)1435 static bool rcu_future_gp_cleanup(struct rcu_node *rnp)
1436 {
1437 	bool needmore;
1438 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
1439 
1440 	needmore = ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed);
1441 	if (!needmore)
1442 		rnp->gp_seq_needed = rnp->gp_seq; /* Avoid counter wrap. */
1443 	trace_rcu_this_gp(rnp, rdp, rnp->gp_seq,
1444 			  needmore ? TPS("CleanupMore") : TPS("Cleanup"));
1445 	return needmore;
1446 }
1447 
1448 /*
1449  * Awaken the grace-period kthread.  Don't do a self-awaken (unless in an
1450  * interrupt or softirq handler, in which case we just might immediately
1451  * sleep upon return, resulting in a grace-period hang), and don't bother
1452  * awakening when there is nothing for the grace-period kthread to do
1453  * (as in several CPUs raced to awaken, we lost), and finally don't try
1454  * to awaken a kthread that has not yet been created.  If all those checks
1455  * are passed, track some debug information and awaken.
1456  *
1457  * So why do the self-wakeup when in an interrupt or softirq handler
1458  * in the grace-period kthread's context?  Because the kthread might have
1459  * been interrupted just as it was going to sleep, and just after the final
1460  * pre-sleep check of the awaken condition.  In this case, a wakeup really
1461  * is required, and is therefore supplied.
1462  */
rcu_gp_kthread_wake(void)1463 static void rcu_gp_kthread_wake(void)
1464 {
1465 	struct task_struct *t = READ_ONCE(rcu_state.gp_kthread);
1466 
1467 	if ((current == t && !in_irq() && !in_serving_softirq()) ||
1468 	    !READ_ONCE(rcu_state.gp_flags) || !t)
1469 		return;
1470 	WRITE_ONCE(rcu_state.gp_wake_time, jiffies);
1471 	WRITE_ONCE(rcu_state.gp_wake_seq, READ_ONCE(rcu_state.gp_seq));
1472 	swake_up_one(&rcu_state.gp_wq);
1473 }
1474 
1475 /*
1476  * If there is room, assign a ->gp_seq number to any callbacks on this
1477  * CPU that have not already been assigned.  Also accelerate any callbacks
1478  * that were previously assigned a ->gp_seq number that has since proven
1479  * to be too conservative, which can happen if callbacks get assigned a
1480  * ->gp_seq number while RCU is idle, but with reference to a non-root
1481  * rcu_node structure.  This function is idempotent, so it does not hurt
1482  * to call it repeatedly.  Returns an flag saying that we should awaken
1483  * the RCU grace-period kthread.
1484  *
1485  * The caller must hold rnp->lock with interrupts disabled.
1486  */
rcu_accelerate_cbs(struct rcu_node * rnp,struct rcu_data * rdp)1487 static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
1488 {
1489 	unsigned long gp_seq_req;
1490 	bool ret = false;
1491 
1492 	rcu_lockdep_assert_cblist_protected(rdp);
1493 	raw_lockdep_assert_held_rcu_node(rnp);
1494 
1495 	/* If no pending (not yet ready to invoke) callbacks, nothing to do. */
1496 	if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1497 		return false;
1498 
1499 	/*
1500 	 * Callbacks are often registered with incomplete grace-period
1501 	 * information.  Something about the fact that getting exact
1502 	 * information requires acquiring a global lock...  RCU therefore
1503 	 * makes a conservative estimate of the grace period number at which
1504 	 * a given callback will become ready to invoke.	The following
1505 	 * code checks this estimate and improves it when possible, thus
1506 	 * accelerating callback invocation to an earlier grace-period
1507 	 * number.
1508 	 */
1509 	gp_seq_req = rcu_seq_snap(&rcu_state.gp_seq);
1510 	if (rcu_segcblist_accelerate(&rdp->cblist, gp_seq_req))
1511 		ret = rcu_start_this_gp(rnp, rdp, gp_seq_req);
1512 
1513 	/* Trace depending on how much we were able to accelerate. */
1514 	if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL))
1515 		trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccWaitCB"));
1516 	else
1517 		trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccReadyCB"));
1518 
1519 	return ret;
1520 }
1521 
1522 /*
1523  * Similar to rcu_accelerate_cbs(), but does not require that the leaf
1524  * rcu_node structure's ->lock be held.  It consults the cached value
1525  * of ->gp_seq_needed in the rcu_data structure, and if that indicates
1526  * that a new grace-period request be made, invokes rcu_accelerate_cbs()
1527  * while holding the leaf rcu_node structure's ->lock.
1528  */
rcu_accelerate_cbs_unlocked(struct rcu_node * rnp,struct rcu_data * rdp)1529 static void rcu_accelerate_cbs_unlocked(struct rcu_node *rnp,
1530 					struct rcu_data *rdp)
1531 {
1532 	unsigned long c;
1533 	bool needwake;
1534 
1535 	rcu_lockdep_assert_cblist_protected(rdp);
1536 	c = rcu_seq_snap(&rcu_state.gp_seq);
1537 	if (!READ_ONCE(rdp->gpwrap) && ULONG_CMP_GE(rdp->gp_seq_needed, c)) {
1538 		/* Old request still live, so mark recent callbacks. */
1539 		(void)rcu_segcblist_accelerate(&rdp->cblist, c);
1540 		return;
1541 	}
1542 	raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
1543 	needwake = rcu_accelerate_cbs(rnp, rdp);
1544 	raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
1545 	if (needwake)
1546 		rcu_gp_kthread_wake();
1547 }
1548 
1549 /*
1550  * Move any callbacks whose grace period has completed to the
1551  * RCU_DONE_TAIL sublist, then compact the remaining sublists and
1552  * assign ->gp_seq numbers to any callbacks in the RCU_NEXT_TAIL
1553  * sublist.  This function is idempotent, so it does not hurt to
1554  * invoke it repeatedly.  As long as it is not invoked -too- often...
1555  * Returns true if the RCU grace-period kthread needs to be awakened.
1556  *
1557  * The caller must hold rnp->lock with interrupts disabled.
1558  */
rcu_advance_cbs(struct rcu_node * rnp,struct rcu_data * rdp)1559 static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
1560 {
1561 	rcu_lockdep_assert_cblist_protected(rdp);
1562 	raw_lockdep_assert_held_rcu_node(rnp);
1563 
1564 	/* If no pending (not yet ready to invoke) callbacks, nothing to do. */
1565 	if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1566 		return false;
1567 
1568 	/*
1569 	 * Find all callbacks whose ->gp_seq numbers indicate that they
1570 	 * are ready to invoke, and put them into the RCU_DONE_TAIL sublist.
1571 	 */
1572 	rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq);
1573 
1574 	/* Classify any remaining callbacks. */
1575 	return rcu_accelerate_cbs(rnp, rdp);
1576 }
1577 
1578 /*
1579  * Move and classify callbacks, but only if doing so won't require
1580  * that the RCU grace-period kthread be awakened.
1581  */
rcu_advance_cbs_nowake(struct rcu_node * rnp,struct rcu_data * rdp)1582 static void __maybe_unused rcu_advance_cbs_nowake(struct rcu_node *rnp,
1583 						  struct rcu_data *rdp)
1584 {
1585 	rcu_lockdep_assert_cblist_protected(rdp);
1586 	if (!rcu_seq_state(rcu_seq_current(&rnp->gp_seq)) || !raw_spin_trylock_rcu_node(rnp))
1587 		return;
1588 	// The grace period cannot end while we hold the rcu_node lock.
1589 	if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))
1590 		WARN_ON_ONCE(rcu_advance_cbs(rnp, rdp));
1591 	raw_spin_unlock_rcu_node(rnp);
1592 }
1593 
1594 /*
1595  * In CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels, attempt to generate a
1596  * quiescent state.  This is intended to be invoked when the CPU notices
1597  * a new grace period.
1598  */
rcu_strict_gp_check_qs(void)1599 static void rcu_strict_gp_check_qs(void)
1600 {
1601 	if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) {
1602 		rcu_read_lock();
1603 		rcu_read_unlock();
1604 	}
1605 }
1606 
1607 /*
1608  * Update CPU-local rcu_data state to record the beginnings and ends of
1609  * grace periods.  The caller must hold the ->lock of the leaf rcu_node
1610  * structure corresponding to the current CPU, and must have irqs disabled.
1611  * Returns true if the grace-period kthread needs to be awakened.
1612  */
__note_gp_changes(struct rcu_node * rnp,struct rcu_data * rdp)1613 static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp)
1614 {
1615 	bool ret = false;
1616 	bool need_qs;
1617 	const bool offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
1618 			       rcu_segcblist_is_offloaded(&rdp->cblist);
1619 
1620 	raw_lockdep_assert_held_rcu_node(rnp);
1621 
1622 	if (rdp->gp_seq == rnp->gp_seq)
1623 		return false; /* Nothing to do. */
1624 
1625 	/* Handle the ends of any preceding grace periods first. */
1626 	if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) ||
1627 	    unlikely(READ_ONCE(rdp->gpwrap))) {
1628 		if (!offloaded)
1629 			ret = rcu_advance_cbs(rnp, rdp); /* Advance CBs. */
1630 		rdp->core_needs_qs = false;
1631 		trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuend"));
1632 	} else {
1633 		if (!offloaded)
1634 			ret = rcu_accelerate_cbs(rnp, rdp); /* Recent CBs. */
1635 		if (rdp->core_needs_qs)
1636 			rdp->core_needs_qs = !!(rnp->qsmask & rdp->grpmask);
1637 	}
1638 
1639 	/* Now handle the beginnings of any new-to-this-CPU grace periods. */
1640 	if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) ||
1641 	    unlikely(READ_ONCE(rdp->gpwrap))) {
1642 		/*
1643 		 * If the current grace period is waiting for this CPU,
1644 		 * set up to detect a quiescent state, otherwise don't
1645 		 * go looking for one.
1646 		 */
1647 		trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, TPS("cpustart"));
1648 		need_qs = !!(rnp->qsmask & rdp->grpmask);
1649 		rdp->cpu_no_qs.b.norm = need_qs;
1650 		rdp->core_needs_qs = need_qs;
1651 		zero_cpu_stall_ticks(rdp);
1652 	}
1653 	rdp->gp_seq = rnp->gp_seq;  /* Remember new grace-period state. */
1654 	if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap)
1655 		WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed);
1656 	WRITE_ONCE(rdp->gpwrap, false);
1657 	rcu_gpnum_ovf(rnp, rdp);
1658 	return ret;
1659 }
1660 
note_gp_changes(struct rcu_data * rdp)1661 static void note_gp_changes(struct rcu_data *rdp)
1662 {
1663 	unsigned long flags;
1664 	bool needwake;
1665 	struct rcu_node *rnp;
1666 
1667 	local_irq_save(flags);
1668 	rnp = rdp->mynode;
1669 	if ((rdp->gp_seq == rcu_seq_current(&rnp->gp_seq) &&
1670 	     !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */
1671 	    !raw_spin_trylock_rcu_node(rnp)) { /* irqs already off, so later. */
1672 		local_irq_restore(flags);
1673 		return;
1674 	}
1675 	needwake = __note_gp_changes(rnp, rdp);
1676 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1677 	rcu_strict_gp_check_qs();
1678 	if (needwake)
1679 		rcu_gp_kthread_wake();
1680 }
1681 
rcu_gp_slow(int delay)1682 static void rcu_gp_slow(int delay)
1683 {
1684 	if (delay > 0 &&
1685 	    !(rcu_seq_ctr(rcu_state.gp_seq) %
1686 	      (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay)))
1687 		schedule_timeout_idle(delay);
1688 }
1689 
1690 static unsigned long sleep_duration;
1691 
1692 /* Allow rcutorture to stall the grace-period kthread. */
rcu_gp_set_torture_wait(int duration)1693 void rcu_gp_set_torture_wait(int duration)
1694 {
1695 	if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST) && duration > 0)
1696 		WRITE_ONCE(sleep_duration, duration);
1697 }
1698 EXPORT_SYMBOL_GPL(rcu_gp_set_torture_wait);
1699 
1700 /* Actually implement the aforementioned wait. */
rcu_gp_torture_wait(void)1701 static void rcu_gp_torture_wait(void)
1702 {
1703 	unsigned long duration;
1704 
1705 	if (!IS_ENABLED(CONFIG_RCU_TORTURE_TEST))
1706 		return;
1707 	duration = xchg(&sleep_duration, 0UL);
1708 	if (duration > 0) {
1709 		pr_alert("%s: Waiting %lu jiffies\n", __func__, duration);
1710 		schedule_timeout_idle(duration);
1711 		pr_alert("%s: Wait complete\n", __func__);
1712 	}
1713 }
1714 
1715 /*
1716  * Handler for on_each_cpu() to invoke the target CPU's RCU core
1717  * processing.
1718  */
rcu_strict_gp_boundary(void * unused)1719 static void rcu_strict_gp_boundary(void *unused)
1720 {
1721 	invoke_rcu_core();
1722 }
1723 
1724 /*
1725  * Initialize a new grace period.  Return false if no grace period required.
1726  */
rcu_gp_init(void)1727 static bool rcu_gp_init(void)
1728 {
1729 	unsigned long firstseq;
1730 	unsigned long flags;
1731 	unsigned long oldmask;
1732 	unsigned long mask;
1733 	struct rcu_data *rdp;
1734 	struct rcu_node *rnp = rcu_get_root();
1735 
1736 	WRITE_ONCE(rcu_state.gp_activity, jiffies);
1737 	raw_spin_lock_irq_rcu_node(rnp);
1738 	if (!READ_ONCE(rcu_state.gp_flags)) {
1739 		/* Spurious wakeup, tell caller to go back to sleep.  */
1740 		raw_spin_unlock_irq_rcu_node(rnp);
1741 		return false;
1742 	}
1743 	WRITE_ONCE(rcu_state.gp_flags, 0); /* Clear all flags: New GP. */
1744 
1745 	if (WARN_ON_ONCE(rcu_gp_in_progress())) {
1746 		/*
1747 		 * Grace period already in progress, don't start another.
1748 		 * Not supposed to be able to happen.
1749 		 */
1750 		raw_spin_unlock_irq_rcu_node(rnp);
1751 		return false;
1752 	}
1753 
1754 	/* Advance to a new grace period and initialize state. */
1755 	record_gp_stall_check_time();
1756 	/* Record GP times before starting GP, hence rcu_seq_start(). */
1757 	rcu_seq_start(&rcu_state.gp_seq);
1758 	ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq);
1759 	trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("start"));
1760 	raw_spin_unlock_irq_rcu_node(rnp);
1761 
1762 	/*
1763 	 * Apply per-leaf buffered online and offline operations to
1764 	 * the rcu_node tree. Note that this new grace period need not
1765 	 * wait for subsequent online CPUs, and that RCU hooks in the CPU
1766 	 * offlining path, when combined with checks in this function,
1767 	 * will handle CPUs that are currently going offline or that will
1768 	 * go offline later.  Please also refer to "Hotplug CPU" section
1769 	 * of RCU's Requirements documentation.
1770 	 */
1771 	rcu_state.gp_state = RCU_GP_ONOFF;
1772 	rcu_for_each_leaf_node(rnp) {
1773 		smp_mb(); // Pair with barriers used when updating ->ofl_seq to odd values.
1774 		firstseq = READ_ONCE(rnp->ofl_seq);
1775 		if (firstseq & 0x1)
1776 			while (firstseq == READ_ONCE(rnp->ofl_seq))
1777 				schedule_timeout_idle(1);  // Can't wake unless RCU is watching.
1778 		smp_mb(); // Pair with barriers used when updating ->ofl_seq to even values.
1779 		raw_spin_lock(&rcu_state.ofl_lock);
1780 		raw_spin_lock_irq_rcu_node(rnp);
1781 		if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
1782 		    !rnp->wait_blkd_tasks) {
1783 			/* Nothing to do on this leaf rcu_node structure. */
1784 			raw_spin_unlock_irq_rcu_node(rnp);
1785 			raw_spin_unlock(&rcu_state.ofl_lock);
1786 			continue;
1787 		}
1788 
1789 		/* Record old state, apply changes to ->qsmaskinit field. */
1790 		oldmask = rnp->qsmaskinit;
1791 		rnp->qsmaskinit = rnp->qsmaskinitnext;
1792 
1793 		/* If zero-ness of ->qsmaskinit changed, propagate up tree. */
1794 		if (!oldmask != !rnp->qsmaskinit) {
1795 			if (!oldmask) { /* First online CPU for rcu_node. */
1796 				if (!rnp->wait_blkd_tasks) /* Ever offline? */
1797 					rcu_init_new_rnp(rnp);
1798 			} else if (rcu_preempt_has_tasks(rnp)) {
1799 				rnp->wait_blkd_tasks = true; /* blocked tasks */
1800 			} else { /* Last offline CPU and can propagate. */
1801 				rcu_cleanup_dead_rnp(rnp);
1802 			}
1803 		}
1804 
1805 		/*
1806 		 * If all waited-on tasks from prior grace period are
1807 		 * done, and if all this rcu_node structure's CPUs are
1808 		 * still offline, propagate up the rcu_node tree and
1809 		 * clear ->wait_blkd_tasks.  Otherwise, if one of this
1810 		 * rcu_node structure's CPUs has since come back online,
1811 		 * simply clear ->wait_blkd_tasks.
1812 		 */
1813 		if (rnp->wait_blkd_tasks &&
1814 		    (!rcu_preempt_has_tasks(rnp) || rnp->qsmaskinit)) {
1815 			rnp->wait_blkd_tasks = false;
1816 			if (!rnp->qsmaskinit)
1817 				rcu_cleanup_dead_rnp(rnp);
1818 		}
1819 
1820 		raw_spin_unlock_irq_rcu_node(rnp);
1821 		raw_spin_unlock(&rcu_state.ofl_lock);
1822 	}
1823 	rcu_gp_slow(gp_preinit_delay); /* Races with CPU hotplug. */
1824 
1825 	/*
1826 	 * Set the quiescent-state-needed bits in all the rcu_node
1827 	 * structures for all currently online CPUs in breadth-first
1828 	 * order, starting from the root rcu_node structure, relying on the
1829 	 * layout of the tree within the rcu_state.node[] array.  Note that
1830 	 * other CPUs will access only the leaves of the hierarchy, thus
1831 	 * seeing that no grace period is in progress, at least until the
1832 	 * corresponding leaf node has been initialized.
1833 	 *
1834 	 * The grace period cannot complete until the initialization
1835 	 * process finishes, because this kthread handles both.
1836 	 */
1837 	rcu_state.gp_state = RCU_GP_INIT;
1838 	rcu_for_each_node_breadth_first(rnp) {
1839 		rcu_gp_slow(gp_init_delay);
1840 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
1841 		rdp = this_cpu_ptr(&rcu_data);
1842 		rcu_preempt_check_blocked_tasks(rnp);
1843 		rnp->qsmask = rnp->qsmaskinit;
1844 		WRITE_ONCE(rnp->gp_seq, rcu_state.gp_seq);
1845 		if (rnp == rdp->mynode)
1846 			(void)__note_gp_changes(rnp, rdp);
1847 		rcu_preempt_boost_start_gp(rnp);
1848 		trace_rcu_grace_period_init(rcu_state.name, rnp->gp_seq,
1849 					    rnp->level, rnp->grplo,
1850 					    rnp->grphi, rnp->qsmask);
1851 		/* Quiescent states for tasks on any now-offline CPUs. */
1852 		mask = rnp->qsmask & ~rnp->qsmaskinitnext;
1853 		rnp->rcu_gp_init_mask = mask;
1854 		if ((mask || rnp->wait_blkd_tasks) && rcu_is_leaf_node(rnp))
1855 			rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
1856 		else
1857 			raw_spin_unlock_irq_rcu_node(rnp);
1858 		cond_resched_tasks_rcu_qs();
1859 		WRITE_ONCE(rcu_state.gp_activity, jiffies);
1860 	}
1861 
1862 	// If strict, make all CPUs aware of new grace period.
1863 	if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
1864 		on_each_cpu(rcu_strict_gp_boundary, NULL, 0);
1865 
1866 	return true;
1867 }
1868 
1869 /*
1870  * Helper function for swait_event_idle_exclusive() wakeup at force-quiescent-state
1871  * time.
1872  */
rcu_gp_fqs_check_wake(int * gfp)1873 static bool rcu_gp_fqs_check_wake(int *gfp)
1874 {
1875 	struct rcu_node *rnp = rcu_get_root();
1876 
1877 	// If under overload conditions, force an immediate FQS scan.
1878 	if (*gfp & RCU_GP_FLAG_OVLD)
1879 		return true;
1880 
1881 	// Someone like call_rcu() requested a force-quiescent-state scan.
1882 	*gfp = READ_ONCE(rcu_state.gp_flags);
1883 	if (*gfp & RCU_GP_FLAG_FQS)
1884 		return true;
1885 
1886 	// The current grace period has completed.
1887 	if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp))
1888 		return true;
1889 
1890 	return false;
1891 }
1892 
1893 /*
1894  * Do one round of quiescent-state forcing.
1895  */
rcu_gp_fqs(bool first_time)1896 static void rcu_gp_fqs(bool first_time)
1897 {
1898 	struct rcu_node *rnp = rcu_get_root();
1899 
1900 	WRITE_ONCE(rcu_state.gp_activity, jiffies);
1901 	WRITE_ONCE(rcu_state.n_force_qs, rcu_state.n_force_qs + 1);
1902 	if (first_time) {
1903 		/* Collect dyntick-idle snapshots. */
1904 		force_qs_rnp(dyntick_save_progress_counter);
1905 	} else {
1906 		/* Handle dyntick-idle and offline CPUs. */
1907 		force_qs_rnp(rcu_implicit_dynticks_qs);
1908 	}
1909 	/* Clear flag to prevent immediate re-entry. */
1910 	if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
1911 		raw_spin_lock_irq_rcu_node(rnp);
1912 		WRITE_ONCE(rcu_state.gp_flags,
1913 			   READ_ONCE(rcu_state.gp_flags) & ~RCU_GP_FLAG_FQS);
1914 		raw_spin_unlock_irq_rcu_node(rnp);
1915 	}
1916 }
1917 
1918 /*
1919  * Loop doing repeated quiescent-state forcing until the grace period ends.
1920  */
rcu_gp_fqs_loop(void)1921 static void rcu_gp_fqs_loop(void)
1922 {
1923 	bool first_gp_fqs;
1924 	int gf = 0;
1925 	unsigned long j;
1926 	int ret;
1927 	struct rcu_node *rnp = rcu_get_root();
1928 
1929 	first_gp_fqs = true;
1930 	j = READ_ONCE(jiffies_till_first_fqs);
1931 	if (rcu_state.cbovld)
1932 		gf = RCU_GP_FLAG_OVLD;
1933 	ret = 0;
1934 	for (;;) {
1935 		if (!ret) {
1936 			rcu_state.jiffies_force_qs = jiffies + j;
1937 			WRITE_ONCE(rcu_state.jiffies_kick_kthreads,
1938 				   jiffies + (j ? 3 * j : 2));
1939 		}
1940 		trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1941 				       TPS("fqswait"));
1942 		rcu_state.gp_state = RCU_GP_WAIT_FQS;
1943 		ret = swait_event_idle_timeout_exclusive(
1944 				rcu_state.gp_wq, rcu_gp_fqs_check_wake(&gf), j);
1945 		rcu_gp_torture_wait();
1946 		rcu_state.gp_state = RCU_GP_DOING_FQS;
1947 		/* Locking provides needed memory barriers. */
1948 		/* If grace period done, leave loop. */
1949 		if (!READ_ONCE(rnp->qsmask) &&
1950 		    !rcu_preempt_blocked_readers_cgp(rnp))
1951 			break;
1952 		/* If time for quiescent-state forcing, do it. */
1953 		if (!time_after(rcu_state.jiffies_force_qs, jiffies) ||
1954 		    (gf & (RCU_GP_FLAG_FQS | RCU_GP_FLAG_OVLD))) {
1955 			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1956 					       TPS("fqsstart"));
1957 			rcu_gp_fqs(first_gp_fqs);
1958 			gf = 0;
1959 			if (first_gp_fqs) {
1960 				first_gp_fqs = false;
1961 				gf = rcu_state.cbovld ? RCU_GP_FLAG_OVLD : 0;
1962 			}
1963 			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1964 					       TPS("fqsend"));
1965 			cond_resched_tasks_rcu_qs();
1966 			WRITE_ONCE(rcu_state.gp_activity, jiffies);
1967 			ret = 0; /* Force full wait till next FQS. */
1968 			j = READ_ONCE(jiffies_till_next_fqs);
1969 		} else {
1970 			/* Deal with stray signal. */
1971 			cond_resched_tasks_rcu_qs();
1972 			WRITE_ONCE(rcu_state.gp_activity, jiffies);
1973 			WARN_ON(signal_pending(current));
1974 			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1975 					       TPS("fqswaitsig"));
1976 			ret = 1; /* Keep old FQS timing. */
1977 			j = jiffies;
1978 			if (time_after(jiffies, rcu_state.jiffies_force_qs))
1979 				j = 1;
1980 			else
1981 				j = rcu_state.jiffies_force_qs - j;
1982 			gf = 0;
1983 		}
1984 	}
1985 }
1986 
1987 /*
1988  * Clean up after the old grace period.
1989  */
rcu_gp_cleanup(void)1990 static void rcu_gp_cleanup(void)
1991 {
1992 	int cpu;
1993 	bool needgp = false;
1994 	unsigned long gp_duration;
1995 	unsigned long new_gp_seq;
1996 	bool offloaded;
1997 	struct rcu_data *rdp;
1998 	struct rcu_node *rnp = rcu_get_root();
1999 	struct swait_queue_head *sq;
2000 
2001 	WRITE_ONCE(rcu_state.gp_activity, jiffies);
2002 	raw_spin_lock_irq_rcu_node(rnp);
2003 	rcu_state.gp_end = jiffies;
2004 	gp_duration = rcu_state.gp_end - rcu_state.gp_start;
2005 	if (gp_duration > rcu_state.gp_max)
2006 		rcu_state.gp_max = gp_duration;
2007 
2008 	/*
2009 	 * We know the grace period is complete, but to everyone else
2010 	 * it appears to still be ongoing.  But it is also the case
2011 	 * that to everyone else it looks like there is nothing that
2012 	 * they can do to advance the grace period.  It is therefore
2013 	 * safe for us to drop the lock in order to mark the grace
2014 	 * period as completed in all of the rcu_node structures.
2015 	 */
2016 	raw_spin_unlock_irq_rcu_node(rnp);
2017 
2018 	/*
2019 	 * Propagate new ->gp_seq value to rcu_node structures so that
2020 	 * other CPUs don't have to wait until the start of the next grace
2021 	 * period to process their callbacks.  This also avoids some nasty
2022 	 * RCU grace-period initialization races by forcing the end of
2023 	 * the current grace period to be completely recorded in all of
2024 	 * the rcu_node structures before the beginning of the next grace
2025 	 * period is recorded in any of the rcu_node structures.
2026 	 */
2027 	new_gp_seq = rcu_state.gp_seq;
2028 	rcu_seq_end(&new_gp_seq);
2029 	rcu_for_each_node_breadth_first(rnp) {
2030 		raw_spin_lock_irq_rcu_node(rnp);
2031 		if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
2032 			dump_blkd_tasks(rnp, 10);
2033 		WARN_ON_ONCE(rnp->qsmask);
2034 		WRITE_ONCE(rnp->gp_seq, new_gp_seq);
2035 		rdp = this_cpu_ptr(&rcu_data);
2036 		if (rnp == rdp->mynode)
2037 			needgp = __note_gp_changes(rnp, rdp) || needgp;
2038 		/* smp_mb() provided by prior unlock-lock pair. */
2039 		needgp = rcu_future_gp_cleanup(rnp) || needgp;
2040 		// Reset overload indication for CPUs no longer overloaded
2041 		if (rcu_is_leaf_node(rnp))
2042 			for_each_leaf_node_cpu_mask(rnp, cpu, rnp->cbovldmask) {
2043 				rdp = per_cpu_ptr(&rcu_data, cpu);
2044 				check_cb_ovld_locked(rdp, rnp);
2045 			}
2046 		sq = rcu_nocb_gp_get(rnp);
2047 		raw_spin_unlock_irq_rcu_node(rnp);
2048 		rcu_nocb_gp_cleanup(sq);
2049 		cond_resched_tasks_rcu_qs();
2050 		WRITE_ONCE(rcu_state.gp_activity, jiffies);
2051 		rcu_gp_slow(gp_cleanup_delay);
2052 	}
2053 	rnp = rcu_get_root();
2054 	raw_spin_lock_irq_rcu_node(rnp); /* GP before ->gp_seq update. */
2055 
2056 	/* Declare grace period done, trace first to use old GP number. */
2057 	trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("end"));
2058 	rcu_seq_end(&rcu_state.gp_seq);
2059 	ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq);
2060 	rcu_state.gp_state = RCU_GP_IDLE;
2061 	/* Check for GP requests since above loop. */
2062 	rdp = this_cpu_ptr(&rcu_data);
2063 	if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) {
2064 		trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed,
2065 				  TPS("CleanupMore"));
2066 		needgp = true;
2067 	}
2068 	/* Advance CBs to reduce false positives below. */
2069 	offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
2070 		    rcu_segcblist_is_offloaded(&rdp->cblist);
2071 	if ((offloaded || !rcu_accelerate_cbs(rnp, rdp)) && needgp) {
2072 		WRITE_ONCE(rcu_state.gp_flags, RCU_GP_FLAG_INIT);
2073 		WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
2074 		trace_rcu_grace_period(rcu_state.name,
2075 				       rcu_state.gp_seq,
2076 				       TPS("newreq"));
2077 	} else {
2078 		WRITE_ONCE(rcu_state.gp_flags,
2079 			   rcu_state.gp_flags & RCU_GP_FLAG_INIT);
2080 	}
2081 	raw_spin_unlock_irq_rcu_node(rnp);
2082 
2083 	// If strict, make all CPUs aware of the end of the old grace period.
2084 	if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
2085 		on_each_cpu(rcu_strict_gp_boundary, NULL, 0);
2086 }
2087 
2088 /*
2089  * Body of kthread that handles grace periods.
2090  */
rcu_gp_kthread(void * unused)2091 static int __noreturn rcu_gp_kthread(void *unused)
2092 {
2093 	rcu_bind_gp_kthread();
2094 	for (;;) {
2095 
2096 		/* Handle grace-period start. */
2097 		for (;;) {
2098 			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2099 					       TPS("reqwait"));
2100 			rcu_state.gp_state = RCU_GP_WAIT_GPS;
2101 			swait_event_idle_exclusive(rcu_state.gp_wq,
2102 					 READ_ONCE(rcu_state.gp_flags) &
2103 					 RCU_GP_FLAG_INIT);
2104 			rcu_gp_torture_wait();
2105 			rcu_state.gp_state = RCU_GP_DONE_GPS;
2106 			/* Locking provides needed memory barrier. */
2107 			if (rcu_gp_init())
2108 				break;
2109 			cond_resched_tasks_rcu_qs();
2110 			WRITE_ONCE(rcu_state.gp_activity, jiffies);
2111 			WARN_ON(signal_pending(current));
2112 			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2113 					       TPS("reqwaitsig"));
2114 		}
2115 
2116 		/* Handle quiescent-state forcing. */
2117 		rcu_gp_fqs_loop();
2118 
2119 		/* Handle grace-period end. */
2120 		rcu_state.gp_state = RCU_GP_CLEANUP;
2121 		rcu_gp_cleanup();
2122 		rcu_state.gp_state = RCU_GP_CLEANED;
2123 	}
2124 }
2125 
2126 /*
2127  * Report a full set of quiescent states to the rcu_state data structure.
2128  * Invoke rcu_gp_kthread_wake() to awaken the grace-period kthread if
2129  * another grace period is required.  Whether we wake the grace-period
2130  * kthread or it awakens itself for the next round of quiescent-state
2131  * forcing, that kthread will clean up after the just-completed grace
2132  * period.  Note that the caller must hold rnp->lock, which is released
2133  * before return.
2134  */
rcu_report_qs_rsp(unsigned long flags)2135 static void rcu_report_qs_rsp(unsigned long flags)
2136 	__releases(rcu_get_root()->lock)
2137 {
2138 	raw_lockdep_assert_held_rcu_node(rcu_get_root());
2139 	WARN_ON_ONCE(!rcu_gp_in_progress());
2140 	WRITE_ONCE(rcu_state.gp_flags,
2141 		   READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS);
2142 	raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(), flags);
2143 	rcu_gp_kthread_wake();
2144 }
2145 
2146 /*
2147  * Similar to rcu_report_qs_rdp(), for which it is a helper function.
2148  * Allows quiescent states for a group of CPUs to be reported at one go
2149  * to the specified rcu_node structure, though all the CPUs in the group
2150  * must be represented by the same rcu_node structure (which need not be a
2151  * leaf rcu_node structure, though it often will be).  The gps parameter
2152  * is the grace-period snapshot, which means that the quiescent states
2153  * are valid only if rnp->gp_seq is equal to gps.  That structure's lock
2154  * must be held upon entry, and it is released before return.
2155  *
2156  * As a special case, if mask is zero, the bit-already-cleared check is
2157  * disabled.  This allows propagating quiescent state due to resumed tasks
2158  * during grace-period initialization.
2159  */
rcu_report_qs_rnp(unsigned long mask,struct rcu_node * rnp,unsigned long gps,unsigned long flags)2160 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
2161 			      unsigned long gps, unsigned long flags)
2162 	__releases(rnp->lock)
2163 {
2164 	unsigned long oldmask = 0;
2165 	struct rcu_node *rnp_c;
2166 
2167 	raw_lockdep_assert_held_rcu_node(rnp);
2168 
2169 	/* Walk up the rcu_node hierarchy. */
2170 	for (;;) {
2171 		if ((!(rnp->qsmask & mask) && mask) || rnp->gp_seq != gps) {
2172 
2173 			/*
2174 			 * Our bit has already been cleared, or the
2175 			 * relevant grace period is already over, so done.
2176 			 */
2177 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2178 			return;
2179 		}
2180 		WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */
2181 		WARN_ON_ONCE(!rcu_is_leaf_node(rnp) &&
2182 			     rcu_preempt_blocked_readers_cgp(rnp));
2183 		WRITE_ONCE(rnp->qsmask, rnp->qsmask & ~mask);
2184 		trace_rcu_quiescent_state_report(rcu_state.name, rnp->gp_seq,
2185 						 mask, rnp->qsmask, rnp->level,
2186 						 rnp->grplo, rnp->grphi,
2187 						 !!rnp->gp_tasks);
2188 		if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
2189 
2190 			/* Other bits still set at this level, so done. */
2191 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2192 			return;
2193 		}
2194 		rnp->completedqs = rnp->gp_seq;
2195 		mask = rnp->grpmask;
2196 		if (rnp->parent == NULL) {
2197 
2198 			/* No more levels.  Exit loop holding root lock. */
2199 
2200 			break;
2201 		}
2202 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2203 		rnp_c = rnp;
2204 		rnp = rnp->parent;
2205 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
2206 		oldmask = READ_ONCE(rnp_c->qsmask);
2207 	}
2208 
2209 	/*
2210 	 * Get here if we are the last CPU to pass through a quiescent
2211 	 * state for this grace period.  Invoke rcu_report_qs_rsp()
2212 	 * to clean up and start the next grace period if one is needed.
2213 	 */
2214 	rcu_report_qs_rsp(flags); /* releases rnp->lock. */
2215 }
2216 
2217 /*
2218  * Record a quiescent state for all tasks that were previously queued
2219  * on the specified rcu_node structure and that were blocking the current
2220  * RCU grace period.  The caller must hold the corresponding rnp->lock with
2221  * irqs disabled, and this lock is released upon return, but irqs remain
2222  * disabled.
2223  */
2224 static void __maybe_unused
rcu_report_unblock_qs_rnp(struct rcu_node * rnp,unsigned long flags)2225 rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
2226 	__releases(rnp->lock)
2227 {
2228 	unsigned long gps;
2229 	unsigned long mask;
2230 	struct rcu_node *rnp_p;
2231 
2232 	raw_lockdep_assert_held_rcu_node(rnp);
2233 	if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT_RCU)) ||
2234 	    WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)) ||
2235 	    rnp->qsmask != 0) {
2236 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2237 		return;  /* Still need more quiescent states! */
2238 	}
2239 
2240 	rnp->completedqs = rnp->gp_seq;
2241 	rnp_p = rnp->parent;
2242 	if (rnp_p == NULL) {
2243 		/*
2244 		 * Only one rcu_node structure in the tree, so don't
2245 		 * try to report up to its nonexistent parent!
2246 		 */
2247 		rcu_report_qs_rsp(flags);
2248 		return;
2249 	}
2250 
2251 	/* Report up the rest of the hierarchy, tracking current ->gp_seq. */
2252 	gps = rnp->gp_seq;
2253 	mask = rnp->grpmask;
2254 	raw_spin_unlock_rcu_node(rnp);	/* irqs remain disabled. */
2255 	raw_spin_lock_rcu_node(rnp_p);	/* irqs already disabled. */
2256 	rcu_report_qs_rnp(mask, rnp_p, gps, flags);
2257 }
2258 
2259 /*
2260  * Record a quiescent state for the specified CPU to that CPU's rcu_data
2261  * structure.  This must be called from the specified CPU.
2262  */
2263 static void
rcu_report_qs_rdp(struct rcu_data * rdp)2264 rcu_report_qs_rdp(struct rcu_data *rdp)
2265 {
2266 	unsigned long flags;
2267 	unsigned long mask;
2268 	bool needwake = false;
2269 	const bool offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
2270 			       rcu_segcblist_is_offloaded(&rdp->cblist);
2271 	struct rcu_node *rnp;
2272 
2273 	WARN_ON_ONCE(rdp->cpu != smp_processor_id());
2274 	rnp = rdp->mynode;
2275 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
2276 	if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq ||
2277 	    rdp->gpwrap) {
2278 
2279 		/*
2280 		 * The grace period in which this quiescent state was
2281 		 * recorded has ended, so don't report it upwards.
2282 		 * We will instead need a new quiescent state that lies
2283 		 * within the current grace period.
2284 		 */
2285 		rdp->cpu_no_qs.b.norm = true;	/* need qs for new gp. */
2286 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2287 		return;
2288 	}
2289 	mask = rdp->grpmask;
2290 	rdp->core_needs_qs = false;
2291 	if ((rnp->qsmask & mask) == 0) {
2292 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2293 	} else {
2294 		/*
2295 		 * This GP can't end until cpu checks in, so all of our
2296 		 * callbacks can be processed during the next GP.
2297 		 */
2298 		if (!offloaded)
2299 			needwake = rcu_accelerate_cbs(rnp, rdp);
2300 
2301 		rcu_disable_urgency_upon_qs(rdp);
2302 		rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
2303 		/* ^^^ Released rnp->lock */
2304 		if (needwake)
2305 			rcu_gp_kthread_wake();
2306 	}
2307 }
2308 
2309 /*
2310  * Check to see if there is a new grace period of which this CPU
2311  * is not yet aware, and if so, set up local rcu_data state for it.
2312  * Otherwise, see if this CPU has just passed through its first
2313  * quiescent state for this grace period, and record that fact if so.
2314  */
2315 static void
rcu_check_quiescent_state(struct rcu_data * rdp)2316 rcu_check_quiescent_state(struct rcu_data *rdp)
2317 {
2318 	/* Check for grace-period ends and beginnings. */
2319 	note_gp_changes(rdp);
2320 
2321 	/*
2322 	 * Does this CPU still need to do its part for current grace period?
2323 	 * If no, return and let the other CPUs do their part as well.
2324 	 */
2325 	if (!rdp->core_needs_qs)
2326 		return;
2327 
2328 	/*
2329 	 * Was there a quiescent state since the beginning of the grace
2330 	 * period? If no, then exit and wait for the next call.
2331 	 */
2332 	if (rdp->cpu_no_qs.b.norm)
2333 		return;
2334 
2335 	/*
2336 	 * Tell RCU we are done (but rcu_report_qs_rdp() will be the
2337 	 * judge of that).
2338 	 */
2339 	rcu_report_qs_rdp(rdp);
2340 }
2341 
2342 /*
2343  * Near the end of the offline process.  Trace the fact that this CPU
2344  * is going offline.
2345  */
rcutree_dying_cpu(unsigned int cpu)2346 int rcutree_dying_cpu(unsigned int cpu)
2347 {
2348 	bool blkd;
2349 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
2350 	struct rcu_node *rnp = rdp->mynode;
2351 
2352 	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
2353 		return 0;
2354 
2355 	blkd = !!(rnp->qsmask & rdp->grpmask);
2356 	trace_rcu_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
2357 			       blkd ? TPS("cpuofl") : TPS("cpuofl-bgp"));
2358 	return 0;
2359 }
2360 
2361 /*
2362  * All CPUs for the specified rcu_node structure have gone offline,
2363  * and all tasks that were preempted within an RCU read-side critical
2364  * section while running on one of those CPUs have since exited their RCU
2365  * read-side critical section.  Some other CPU is reporting this fact with
2366  * the specified rcu_node structure's ->lock held and interrupts disabled.
2367  * This function therefore goes up the tree of rcu_node structures,
2368  * clearing the corresponding bits in the ->qsmaskinit fields.  Note that
2369  * the leaf rcu_node structure's ->qsmaskinit field has already been
2370  * updated.
2371  *
2372  * This function does check that the specified rcu_node structure has
2373  * all CPUs offline and no blocked tasks, so it is OK to invoke it
2374  * prematurely.  That said, invoking it after the fact will cost you
2375  * a needless lock acquisition.  So once it has done its work, don't
2376  * invoke it again.
2377  */
rcu_cleanup_dead_rnp(struct rcu_node * rnp_leaf)2378 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
2379 {
2380 	long mask;
2381 	struct rcu_node *rnp = rnp_leaf;
2382 
2383 	raw_lockdep_assert_held_rcu_node(rnp_leaf);
2384 	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
2385 	    WARN_ON_ONCE(rnp_leaf->qsmaskinit) ||
2386 	    WARN_ON_ONCE(rcu_preempt_has_tasks(rnp_leaf)))
2387 		return;
2388 	for (;;) {
2389 		mask = rnp->grpmask;
2390 		rnp = rnp->parent;
2391 		if (!rnp)
2392 			break;
2393 		raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
2394 		rnp->qsmaskinit &= ~mask;
2395 		/* Between grace periods, so better already be zero! */
2396 		WARN_ON_ONCE(rnp->qsmask);
2397 		if (rnp->qsmaskinit) {
2398 			raw_spin_unlock_rcu_node(rnp);
2399 			/* irqs remain disabled. */
2400 			return;
2401 		}
2402 		raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
2403 	}
2404 }
2405 
2406 /*
2407  * The CPU has been completely removed, and some other CPU is reporting
2408  * this fact from process context.  Do the remainder of the cleanup.
2409  * There can only be one CPU hotplug operation at a time, so no need for
2410  * explicit locking.
2411  */
rcutree_dead_cpu(unsigned int cpu)2412 int rcutree_dead_cpu(unsigned int cpu)
2413 {
2414 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
2415 	struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */
2416 
2417 	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
2418 		return 0;
2419 
2420 	/* Adjust any no-longer-needed kthreads. */
2421 	rcu_boost_kthread_setaffinity(rnp, -1);
2422 	/* Do any needed no-CB deferred wakeups from this CPU. */
2423 	do_nocb_deferred_wakeup(per_cpu_ptr(&rcu_data, cpu));
2424 
2425 	// Stop-machine done, so allow nohz_full to disable tick.
2426 	tick_dep_clear(TICK_DEP_BIT_RCU);
2427 	return 0;
2428 }
2429 
2430 /*
2431  * Invoke any RCU callbacks that have made it to the end of their grace
2432  * period.  Thottle as specified by rdp->blimit.
2433  */
rcu_do_batch(struct rcu_data * rdp)2434 static void rcu_do_batch(struct rcu_data *rdp)
2435 {
2436 	int div;
2437 	unsigned long flags;
2438 	const bool offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
2439 			       rcu_segcblist_is_offloaded(&rdp->cblist);
2440 	struct rcu_head *rhp;
2441 	struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
2442 	long bl, count;
2443 	long pending, tlimit = 0;
2444 
2445 	/* If no callbacks are ready, just return. */
2446 	if (!rcu_segcblist_ready_cbs(&rdp->cblist)) {
2447 		trace_rcu_batch_start(rcu_state.name,
2448 				      rcu_segcblist_n_cbs(&rdp->cblist), 0);
2449 		trace_rcu_batch_end(rcu_state.name, 0,
2450 				    !rcu_segcblist_empty(&rdp->cblist),
2451 				    need_resched(), is_idle_task(current),
2452 				    rcu_is_callbacks_kthread());
2453 		return;
2454 	}
2455 
2456 	/*
2457 	 * Extract the list of ready callbacks, disabling to prevent
2458 	 * races with call_rcu() from interrupt handlers.  Leave the
2459 	 * callback counts, as rcu_barrier() needs to be conservative.
2460 	 */
2461 	local_irq_save(flags);
2462 	rcu_nocb_lock(rdp);
2463 	WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
2464 	pending = rcu_segcblist_n_cbs(&rdp->cblist);
2465 	div = READ_ONCE(rcu_divisor);
2466 	div = div < 0 ? 7 : div > sizeof(long) * 8 - 2 ? sizeof(long) * 8 - 2 : div;
2467 	bl = max(rdp->blimit, pending >> div);
2468 	if (in_serving_softirq() && unlikely(bl > 100)) {
2469 		long rrn = READ_ONCE(rcu_resched_ns);
2470 
2471 		rrn = rrn < NSEC_PER_MSEC ? NSEC_PER_MSEC : rrn > NSEC_PER_SEC ? NSEC_PER_SEC : rrn;
2472 		tlimit = local_clock() + rrn;
2473 	}
2474 	trace_rcu_batch_start(rcu_state.name,
2475 			      rcu_segcblist_n_cbs(&rdp->cblist), bl);
2476 	rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl);
2477 	if (offloaded)
2478 		rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
2479 	rcu_nocb_unlock_irqrestore(rdp, flags);
2480 
2481 	/* Invoke callbacks. */
2482 	tick_dep_set_task(current, TICK_DEP_BIT_RCU);
2483 	rhp = rcu_cblist_dequeue(&rcl);
2484 	for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) {
2485 		rcu_callback_t f;
2486 
2487 		debug_rcu_head_unqueue(rhp);
2488 
2489 		rcu_lock_acquire(&rcu_callback_map);
2490 		trace_rcu_invoke_callback(rcu_state.name, rhp);
2491 
2492 		f = rhp->func;
2493 		WRITE_ONCE(rhp->func, (rcu_callback_t)0L);
2494 		f(rhp);
2495 
2496 		rcu_lock_release(&rcu_callback_map);
2497 
2498 		/*
2499 		 * Stop only if limit reached and CPU has something to do.
2500 		 * Note: The rcl structure counts down from zero.
2501 		 */
2502 		if (in_serving_softirq()) {
2503 			if (-rcl.len >= bl && (need_resched() ||
2504 					(!is_idle_task(current) && !rcu_is_callbacks_kthread())))
2505 				break;
2506 
2507 			/*
2508 			 * Make sure we don't spend too much time here and deprive other
2509 			 * softirq vectors of CPU cycles.
2510 			 */
2511 			if (unlikely(tlimit)) {
2512 				/* only call local_clock() every 32 callbacks */
2513 				if (likely((-rcl.len & 31) || local_clock() < tlimit))
2514 					continue;
2515 				/* Exceeded the time limit, so leave. */
2516 				break;
2517 			}
2518 		} else {
2519 			local_bh_enable();
2520 			lockdep_assert_irqs_enabled();
2521 			cond_resched_tasks_rcu_qs();
2522 			lockdep_assert_irqs_enabled();
2523 			local_bh_disable();
2524 		}
2525 	}
2526 
2527 	local_irq_save(flags);
2528 	rcu_nocb_lock(rdp);
2529 	count = -rcl.len;
2530 	rdp->n_cbs_invoked += count;
2531 	trace_rcu_batch_end(rcu_state.name, count, !!rcl.head, need_resched(),
2532 			    is_idle_task(current), rcu_is_callbacks_kthread());
2533 
2534 	/* Update counts and requeue any remaining callbacks. */
2535 	rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl);
2536 	smp_mb(); /* List handling before counting for rcu_barrier(). */
2537 	rcu_segcblist_insert_count(&rdp->cblist, &rcl);
2538 
2539 	/* Reinstate batch limit if we have worked down the excess. */
2540 	count = rcu_segcblist_n_cbs(&rdp->cblist);
2541 	if (rdp->blimit >= DEFAULT_MAX_RCU_BLIMIT && count <= qlowmark)
2542 		rdp->blimit = blimit;
2543 
2544 	/* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
2545 	if (count == 0 && rdp->qlen_last_fqs_check != 0) {
2546 		rdp->qlen_last_fqs_check = 0;
2547 		rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
2548 	} else if (count < rdp->qlen_last_fqs_check - qhimark)
2549 		rdp->qlen_last_fqs_check = count;
2550 
2551 	/*
2552 	 * The following usually indicates a double call_rcu().  To track
2553 	 * this down, try building with CONFIG_DEBUG_OBJECTS_RCU_HEAD=y.
2554 	 */
2555 	WARN_ON_ONCE(count == 0 && !rcu_segcblist_empty(&rdp->cblist));
2556 	WARN_ON_ONCE(!IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
2557 		     count != 0 && rcu_segcblist_empty(&rdp->cblist));
2558 
2559 	rcu_nocb_unlock_irqrestore(rdp, flags);
2560 
2561 	/* Re-invoke RCU core processing if there are callbacks remaining. */
2562 	if (!offloaded && rcu_segcblist_ready_cbs(&rdp->cblist))
2563 		invoke_rcu_core();
2564 	tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
2565 }
2566 
2567 /*
2568  * This function is invoked from each scheduling-clock interrupt,
2569  * and checks to see if this CPU is in a non-context-switch quiescent
2570  * state, for example, user mode or idle loop.  It also schedules RCU
2571  * core processing.  If the current grace period has gone on too long,
2572  * it will ask the scheduler to manufacture a context switch for the sole
2573  * purpose of providing a providing the needed quiescent state.
2574  */
rcu_sched_clock_irq(int user)2575 void rcu_sched_clock_irq(int user)
2576 {
2577 	trace_rcu_utilization(TPS("Start scheduler-tick"));
2578 	lockdep_assert_irqs_disabled();
2579 	raw_cpu_inc(rcu_data.ticks_this_gp);
2580 	/* The load-acquire pairs with the store-release setting to true. */
2581 	if (smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) {
2582 		/* Idle and userspace execution already are quiescent states. */
2583 		if (!rcu_is_cpu_rrupt_from_idle() && !user) {
2584 			set_tsk_need_resched(current);
2585 			set_preempt_need_resched();
2586 		}
2587 		__this_cpu_write(rcu_data.rcu_urgent_qs, false);
2588 	}
2589 	rcu_flavor_sched_clock_irq(user);
2590 	if (rcu_pending(user))
2591 		invoke_rcu_core();
2592 	lockdep_assert_irqs_disabled();
2593 
2594 	trace_rcu_utilization(TPS("End scheduler-tick"));
2595 }
2596 
2597 /*
2598  * Scan the leaf rcu_node structures.  For each structure on which all
2599  * CPUs have reported a quiescent state and on which there are tasks
2600  * blocking the current grace period, initiate RCU priority boosting.
2601  * Otherwise, invoke the specified function to check dyntick state for
2602  * each CPU that has not yet reported a quiescent state.
2603  */
force_qs_rnp(int (* f)(struct rcu_data * rdp))2604 static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
2605 {
2606 	int cpu;
2607 	unsigned long flags;
2608 	unsigned long mask;
2609 	struct rcu_data *rdp;
2610 	struct rcu_node *rnp;
2611 
2612 	rcu_state.cbovld = rcu_state.cbovldnext;
2613 	rcu_state.cbovldnext = false;
2614 	rcu_for_each_leaf_node(rnp) {
2615 		cond_resched_tasks_rcu_qs();
2616 		mask = 0;
2617 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
2618 		rcu_state.cbovldnext |= !!rnp->cbovldmask;
2619 		if (rnp->qsmask == 0) {
2620 			if (rcu_preempt_blocked_readers_cgp(rnp)) {
2621 				/*
2622 				 * No point in scanning bits because they
2623 				 * are all zero.  But we might need to
2624 				 * priority-boost blocked readers.
2625 				 */
2626 				rcu_initiate_boost(rnp, flags);
2627 				/* rcu_initiate_boost() releases rnp->lock */
2628 				continue;
2629 			}
2630 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2631 			continue;
2632 		}
2633 		for_each_leaf_node_cpu_mask(rnp, cpu, rnp->qsmask) {
2634 			rdp = per_cpu_ptr(&rcu_data, cpu);
2635 			if (f(rdp)) {
2636 				mask |= rdp->grpmask;
2637 				rcu_disable_urgency_upon_qs(rdp);
2638 			}
2639 		}
2640 		if (mask != 0) {
2641 			/* Idle/offline CPUs, report (releases rnp->lock). */
2642 			rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
2643 		} else {
2644 			/* Nothing to do here, so just drop the lock. */
2645 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2646 		}
2647 	}
2648 }
2649 
2650 /*
2651  * Force quiescent states on reluctant CPUs, and also detect which
2652  * CPUs are in dyntick-idle mode.
2653  */
rcu_force_quiescent_state(void)2654 void rcu_force_quiescent_state(void)
2655 {
2656 	unsigned long flags;
2657 	bool ret;
2658 	struct rcu_node *rnp;
2659 	struct rcu_node *rnp_old = NULL;
2660 
2661 	/* Funnel through hierarchy to reduce memory contention. */
2662 	rnp = raw_cpu_read(rcu_data.mynode);
2663 	for (; rnp != NULL; rnp = rnp->parent) {
2664 		ret = (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) ||
2665 		       !raw_spin_trylock(&rnp->fqslock);
2666 		if (rnp_old != NULL)
2667 			raw_spin_unlock(&rnp_old->fqslock);
2668 		if (ret)
2669 			return;
2670 		rnp_old = rnp;
2671 	}
2672 	/* rnp_old == rcu_get_root(), rnp == NULL. */
2673 
2674 	/* Reached the root of the rcu_node tree, acquire lock. */
2675 	raw_spin_lock_irqsave_rcu_node(rnp_old, flags);
2676 	raw_spin_unlock(&rnp_old->fqslock);
2677 	if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
2678 		raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2679 		return;  /* Someone beat us to it. */
2680 	}
2681 	WRITE_ONCE(rcu_state.gp_flags,
2682 		   READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS);
2683 	raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2684 	rcu_gp_kthread_wake();
2685 }
2686 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
2687 
2688 // Workqueue handler for an RCU reader for kernels enforcing struct RCU
2689 // grace periods.
strict_work_handler(struct work_struct * work)2690 static void strict_work_handler(struct work_struct *work)
2691 {
2692 	rcu_read_lock();
2693 	rcu_read_unlock();
2694 }
2695 
2696 /* Perform RCU core processing work for the current CPU.  */
rcu_core(void)2697 static __latent_entropy void rcu_core(void)
2698 {
2699 	unsigned long flags;
2700 	struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);
2701 	struct rcu_node *rnp = rdp->mynode;
2702 	const bool offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
2703 			       rcu_segcblist_is_offloaded(&rdp->cblist);
2704 
2705 	if (cpu_is_offline(smp_processor_id()))
2706 		return;
2707 	trace_rcu_utilization(TPS("Start RCU core"));
2708 	WARN_ON_ONCE(!rdp->beenonline);
2709 
2710 	/* Report any deferred quiescent states if preemption enabled. */
2711 	if (!(preempt_count() & PREEMPT_MASK)) {
2712 		rcu_preempt_deferred_qs(current);
2713 	} else if (rcu_preempt_need_deferred_qs(current)) {
2714 		set_tsk_need_resched(current);
2715 		set_preempt_need_resched();
2716 	}
2717 
2718 	/* Update RCU state based on any recent quiescent states. */
2719 	rcu_check_quiescent_state(rdp);
2720 
2721 	/* No grace period and unregistered callbacks? */
2722 	if (!rcu_gp_in_progress() &&
2723 	    rcu_segcblist_is_enabled(&rdp->cblist) && !offloaded) {
2724 		local_irq_save(flags);
2725 		if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
2726 			rcu_accelerate_cbs_unlocked(rnp, rdp);
2727 		local_irq_restore(flags);
2728 	}
2729 
2730 	rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check());
2731 
2732 	/* If there are callbacks ready, invoke them. */
2733 	if (!offloaded && rcu_segcblist_ready_cbs(&rdp->cblist) &&
2734 	    likely(READ_ONCE(rcu_scheduler_fully_active)))
2735 		rcu_do_batch(rdp);
2736 
2737 	/* Do any needed deferred wakeups of rcuo kthreads. */
2738 	do_nocb_deferred_wakeup(rdp);
2739 	trace_rcu_utilization(TPS("End RCU core"));
2740 
2741 	// If strict GPs, schedule an RCU reader in a clean environment.
2742 	if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
2743 		queue_work_on(rdp->cpu, rcu_gp_wq, &rdp->strict_work);
2744 }
2745 
rcu_core_si(struct softirq_action * h)2746 static void rcu_core_si(struct softirq_action *h)
2747 {
2748 	rcu_core();
2749 }
2750 
rcu_wake_cond(struct task_struct * t,int status)2751 static void rcu_wake_cond(struct task_struct *t, int status)
2752 {
2753 	/*
2754 	 * If the thread is yielding, only wake it when this
2755 	 * is invoked from idle
2756 	 */
2757 	if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current)))
2758 		wake_up_process(t);
2759 }
2760 
invoke_rcu_core_kthread(void)2761 static void invoke_rcu_core_kthread(void)
2762 {
2763 	struct task_struct *t;
2764 	unsigned long flags;
2765 
2766 	local_irq_save(flags);
2767 	__this_cpu_write(rcu_data.rcu_cpu_has_work, 1);
2768 	t = __this_cpu_read(rcu_data.rcu_cpu_kthread_task);
2769 	if (t != NULL && t != current)
2770 		rcu_wake_cond(t, __this_cpu_read(rcu_data.rcu_cpu_kthread_status));
2771 	local_irq_restore(flags);
2772 }
2773 
2774 /*
2775  * Wake up this CPU's rcuc kthread to do RCU core processing.
2776  */
invoke_rcu_core(void)2777 static void invoke_rcu_core(void)
2778 {
2779 	if (!cpu_online(smp_processor_id()))
2780 		return;
2781 	if (use_softirq)
2782 		raise_softirq(RCU_SOFTIRQ);
2783 	else
2784 		invoke_rcu_core_kthread();
2785 }
2786 
rcu_cpu_kthread_park(unsigned int cpu)2787 static void rcu_cpu_kthread_park(unsigned int cpu)
2788 {
2789 	per_cpu(rcu_data.rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
2790 }
2791 
rcu_cpu_kthread_should_run(unsigned int cpu)2792 static int rcu_cpu_kthread_should_run(unsigned int cpu)
2793 {
2794 	return __this_cpu_read(rcu_data.rcu_cpu_has_work);
2795 }
2796 
2797 /*
2798  * Per-CPU kernel thread that invokes RCU callbacks.  This replaces
2799  * the RCU softirq used in configurations of RCU that do not support RCU
2800  * priority boosting.
2801  */
rcu_cpu_kthread(unsigned int cpu)2802 static void rcu_cpu_kthread(unsigned int cpu)
2803 {
2804 	unsigned int *statusp = this_cpu_ptr(&rcu_data.rcu_cpu_kthread_status);
2805 	char work, *workp = this_cpu_ptr(&rcu_data.rcu_cpu_has_work);
2806 	int spincnt;
2807 
2808 	trace_rcu_utilization(TPS("Start CPU kthread@rcu_run"));
2809 	for (spincnt = 0; spincnt < 10; spincnt++) {
2810 		local_bh_disable();
2811 		*statusp = RCU_KTHREAD_RUNNING;
2812 		local_irq_disable();
2813 		work = *workp;
2814 		*workp = 0;
2815 		local_irq_enable();
2816 		if (work)
2817 			rcu_core();
2818 		local_bh_enable();
2819 		if (*workp == 0) {
2820 			trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
2821 			*statusp = RCU_KTHREAD_WAITING;
2822 			return;
2823 		}
2824 	}
2825 	*statusp = RCU_KTHREAD_YIELDING;
2826 	trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
2827 	schedule_timeout_idle(2);
2828 	trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
2829 	*statusp = RCU_KTHREAD_WAITING;
2830 }
2831 
2832 static struct smp_hotplug_thread rcu_cpu_thread_spec = {
2833 	.store			= &rcu_data.rcu_cpu_kthread_task,
2834 	.thread_should_run	= rcu_cpu_kthread_should_run,
2835 	.thread_fn		= rcu_cpu_kthread,
2836 	.thread_comm		= "rcuc/%u",
2837 	.setup			= rcu_cpu_kthread_setup,
2838 	.park			= rcu_cpu_kthread_park,
2839 };
2840 
2841 /*
2842  * Spawn per-CPU RCU core processing kthreads.
2843  */
rcu_spawn_core_kthreads(void)2844 static int __init rcu_spawn_core_kthreads(void)
2845 {
2846 	int cpu;
2847 
2848 	for_each_possible_cpu(cpu)
2849 		per_cpu(rcu_data.rcu_cpu_has_work, cpu) = 0;
2850 	if (!IS_ENABLED(CONFIG_RCU_BOOST) && use_softirq)
2851 		return 0;
2852 	WARN_ONCE(smpboot_register_percpu_thread(&rcu_cpu_thread_spec),
2853 		  "%s: Could not start rcuc kthread, OOM is now expected behavior\n", __func__);
2854 	return 0;
2855 }
2856 
2857 /*
2858  * Handle any core-RCU processing required by a call_rcu() invocation.
2859  */
__call_rcu_core(struct rcu_data * rdp,struct rcu_head * head,unsigned long flags)2860 static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head,
2861 			    unsigned long flags)
2862 {
2863 	/*
2864 	 * If called from an extended quiescent state, invoke the RCU
2865 	 * core in order to force a re-evaluation of RCU's idleness.
2866 	 */
2867 	if (!rcu_is_watching())
2868 		invoke_rcu_core();
2869 
2870 	/* If interrupts were disabled or CPU offline, don't invoke RCU core. */
2871 	if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id()))
2872 		return;
2873 
2874 	/*
2875 	 * Force the grace period if too many callbacks or too long waiting.
2876 	 * Enforce hysteresis, and don't invoke rcu_force_quiescent_state()
2877 	 * if some other CPU has recently done so.  Also, don't bother
2878 	 * invoking rcu_force_quiescent_state() if the newly enqueued callback
2879 	 * is the only one waiting for a grace period to complete.
2880 	 */
2881 	if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) >
2882 		     rdp->qlen_last_fqs_check + qhimark)) {
2883 
2884 		/* Are we ignoring a completed grace period? */
2885 		note_gp_changes(rdp);
2886 
2887 		/* Start a new grace period if one not already started. */
2888 		if (!rcu_gp_in_progress()) {
2889 			rcu_accelerate_cbs_unlocked(rdp->mynode, rdp);
2890 		} else {
2891 			/* Give the grace period a kick. */
2892 			rdp->blimit = DEFAULT_MAX_RCU_BLIMIT;
2893 			if (READ_ONCE(rcu_state.n_force_qs) == rdp->n_force_qs_snap &&
2894 			    rcu_segcblist_first_pend_cb(&rdp->cblist) != head)
2895 				rcu_force_quiescent_state();
2896 			rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
2897 			rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
2898 		}
2899 	}
2900 }
2901 
2902 /*
2903  * RCU callback function to leak a callback.
2904  */
rcu_leak_callback(struct rcu_head * rhp)2905 static void rcu_leak_callback(struct rcu_head *rhp)
2906 {
2907 }
2908 
2909 /*
2910  * Check and if necessary update the leaf rcu_node structure's
2911  * ->cbovldmask bit corresponding to the current CPU based on that CPU's
2912  * number of queued RCU callbacks.  The caller must hold the leaf rcu_node
2913  * structure's ->lock.
2914  */
check_cb_ovld_locked(struct rcu_data * rdp,struct rcu_node * rnp)2915 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp)
2916 {
2917 	raw_lockdep_assert_held_rcu_node(rnp);
2918 	if (qovld_calc <= 0)
2919 		return; // Early boot and wildcard value set.
2920 	if (rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc)
2921 		WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask | rdp->grpmask);
2922 	else
2923 		WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask & ~rdp->grpmask);
2924 }
2925 
2926 /*
2927  * Check and if necessary update the leaf rcu_node structure's
2928  * ->cbovldmask bit corresponding to the current CPU based on that CPU's
2929  * number of queued RCU callbacks.  No locks need be held, but the
2930  * caller must have disabled interrupts.
2931  *
2932  * Note that this function ignores the possibility that there are a lot
2933  * of callbacks all of which have already seen the end of their respective
2934  * grace periods.  This omission is due to the need for no-CBs CPUs to
2935  * be holding ->nocb_lock to do this check, which is too heavy for a
2936  * common-case operation.
2937  */
check_cb_ovld(struct rcu_data * rdp)2938 static void check_cb_ovld(struct rcu_data *rdp)
2939 {
2940 	struct rcu_node *const rnp = rdp->mynode;
2941 
2942 	if (qovld_calc <= 0 ||
2943 	    ((rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc) ==
2944 	     !!(READ_ONCE(rnp->cbovldmask) & rdp->grpmask)))
2945 		return; // Early boot wildcard value or already set correctly.
2946 	raw_spin_lock_rcu_node(rnp);
2947 	check_cb_ovld_locked(rdp, rnp);
2948 	raw_spin_unlock_rcu_node(rnp);
2949 }
2950 
2951 /* Helper function for call_rcu() and friends.  */
2952 static void
__call_rcu(struct rcu_head * head,rcu_callback_t func)2953 __call_rcu(struct rcu_head *head, rcu_callback_t func)
2954 {
2955 	unsigned long flags;
2956 	struct rcu_data *rdp;
2957 	bool was_alldone;
2958 
2959 	/* Misaligned rcu_head! */
2960 	WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1));
2961 
2962 	if (debug_rcu_head_queue(head)) {
2963 		/*
2964 		 * Probable double call_rcu(), so leak the callback.
2965 		 * Use rcu:rcu_callback trace event to find the previous
2966 		 * time callback was passed to __call_rcu().
2967 		 */
2968 		WARN_ONCE(1, "__call_rcu(): Double-freed CB %p->%pS()!!!\n",
2969 			  head, head->func);
2970 		WRITE_ONCE(head->func, rcu_leak_callback);
2971 		return;
2972 	}
2973 	head->func = func;
2974 	head->next = NULL;
2975 	local_irq_save(flags);
2976 	kasan_record_aux_stack(head);
2977 	rdp = this_cpu_ptr(&rcu_data);
2978 
2979 	/* Add the callback to our list. */
2980 	if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist))) {
2981 		// This can trigger due to call_rcu() from offline CPU:
2982 		WARN_ON_ONCE(rcu_scheduler_active != RCU_SCHEDULER_INACTIVE);
2983 		WARN_ON_ONCE(!rcu_is_watching());
2984 		// Very early boot, before rcu_init().  Initialize if needed
2985 		// and then drop through to queue the callback.
2986 		if (rcu_segcblist_empty(&rdp->cblist))
2987 			rcu_segcblist_init(&rdp->cblist);
2988 	}
2989 
2990 	check_cb_ovld(rdp);
2991 	if (rcu_nocb_try_bypass(rdp, head, &was_alldone, flags))
2992 		return; // Enqueued onto ->nocb_bypass, so just leave.
2993 	// If no-CBs CPU gets here, rcu_nocb_try_bypass() acquired ->nocb_lock.
2994 	rcu_segcblist_enqueue(&rdp->cblist, head);
2995 	if (__is_kvfree_rcu_offset((unsigned long)func))
2996 		trace_rcu_kvfree_callback(rcu_state.name, head,
2997 					 (unsigned long)func,
2998 					 rcu_segcblist_n_cbs(&rdp->cblist));
2999 	else
3000 		trace_rcu_callback(rcu_state.name, head,
3001 				   rcu_segcblist_n_cbs(&rdp->cblist));
3002 
3003 	/* Go handle any RCU core processing required. */
3004 	if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
3005 	    unlikely(rcu_segcblist_is_offloaded(&rdp->cblist))) {
3006 		__call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */
3007 	} else {
3008 		__call_rcu_core(rdp, head, flags);
3009 		local_irq_restore(flags);
3010 	}
3011 }
3012 
3013 /**
3014  * call_rcu() - Queue an RCU callback for invocation after a grace period.
3015  * @head: structure to be used for queueing the RCU updates.
3016  * @func: actual callback function to be invoked after the grace period
3017  *
3018  * The callback function will be invoked some time after a full grace
3019  * period elapses, in other words after all pre-existing RCU read-side
3020  * critical sections have completed.  However, the callback function
3021  * might well execute concurrently with RCU read-side critical sections
3022  * that started after call_rcu() was invoked.  RCU read-side critical
3023  * sections are delimited by rcu_read_lock() and rcu_read_unlock(), and
3024  * may be nested.  In addition, regions of code across which interrupts,
3025  * preemption, or softirqs have been disabled also serve as RCU read-side
3026  * critical sections.  This includes hardware interrupt handlers, softirq
3027  * handlers, and NMI handlers.
3028  *
3029  * Note that all CPUs must agree that the grace period extended beyond
3030  * all pre-existing RCU read-side critical section.  On systems with more
3031  * than one CPU, this means that when "func()" is invoked, each CPU is
3032  * guaranteed to have executed a full memory barrier since the end of its
3033  * last RCU read-side critical section whose beginning preceded the call
3034  * to call_rcu().  It also means that each CPU executing an RCU read-side
3035  * critical section that continues beyond the start of "func()" must have
3036  * executed a memory barrier after the call_rcu() but before the beginning
3037  * of that RCU read-side critical section.  Note that these guarantees
3038  * include CPUs that are offline, idle, or executing in user mode, as
3039  * well as CPUs that are executing in the kernel.
3040  *
3041  * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
3042  * resulting RCU callback function "func()", then both CPU A and CPU B are
3043  * guaranteed to execute a full memory barrier during the time interval
3044  * between the call to call_rcu() and the invocation of "func()" -- even
3045  * if CPU A and CPU B are the same CPU (but again only if the system has
3046  * more than one CPU).
3047  */
call_rcu(struct rcu_head * head,rcu_callback_t func)3048 void call_rcu(struct rcu_head *head, rcu_callback_t func)
3049 {
3050 	__call_rcu(head, func);
3051 }
3052 EXPORT_SYMBOL_GPL(call_rcu);
3053 
3054 
3055 /* Maximum number of jiffies to wait before draining a batch. */
3056 #define KFREE_DRAIN_JIFFIES (HZ / 50)
3057 #define KFREE_N_BATCHES 2
3058 #define FREE_N_CHANNELS 2
3059 
3060 /**
3061  * struct kvfree_rcu_bulk_data - single block to store kvfree_rcu() pointers
3062  * @nr_records: Number of active pointers in the array
3063  * @next: Next bulk object in the block chain
3064  * @records: Array of the kvfree_rcu() pointers
3065  */
3066 struct kvfree_rcu_bulk_data {
3067 	unsigned long nr_records;
3068 	struct kvfree_rcu_bulk_data *next;
3069 	void *records[];
3070 };
3071 
3072 /*
3073  * This macro defines how many entries the "records" array
3074  * will contain. It is based on the fact that the size of
3075  * kvfree_rcu_bulk_data structure becomes exactly one page.
3076  */
3077 #define KVFREE_BULK_MAX_ENTR \
3078 	((PAGE_SIZE - sizeof(struct kvfree_rcu_bulk_data)) / sizeof(void *))
3079 
3080 /**
3081  * struct kfree_rcu_cpu_work - single batch of kfree_rcu() requests
3082  * @rcu_work: Let queue_rcu_work() invoke workqueue handler after grace period
3083  * @head_free: List of kfree_rcu() objects waiting for a grace period
3084  * @bkvhead_free: Bulk-List of kvfree_rcu() objects waiting for a grace period
3085  * @krcp: Pointer to @kfree_rcu_cpu structure
3086  */
3087 
3088 struct kfree_rcu_cpu_work {
3089 	struct rcu_work rcu_work;
3090 	struct rcu_head *head_free;
3091 	struct kvfree_rcu_bulk_data *bkvhead_free[FREE_N_CHANNELS];
3092 	struct kfree_rcu_cpu *krcp;
3093 };
3094 
3095 /**
3096  * struct kfree_rcu_cpu - batch up kfree_rcu() requests for RCU grace period
3097  * @head: List of kfree_rcu() objects not yet waiting for a grace period
3098  * @bkvhead: Bulk-List of kvfree_rcu() objects not yet waiting for a grace period
3099  * @krw_arr: Array of batches of kfree_rcu() objects waiting for a grace period
3100  * @lock: Synchronize access to this structure
3101  * @monitor_work: Promote @head to @head_free after KFREE_DRAIN_JIFFIES
3102  * @monitor_todo: Tracks whether a @monitor_work delayed work is pending
3103  * @initialized: The @rcu_work fields have been initialized
3104  * @count: Number of objects for which GP not started
3105  * @bkvcache:
3106  *	A simple cache list that contains objects for reuse purpose.
3107  *	In order to save some per-cpu space the list is singular.
3108  *	Even though it is lockless an access has to be protected by the
3109  *	per-cpu lock.
3110  * @page_cache_work: A work to refill the cache when it is empty
3111  * @work_in_progress: Indicates that page_cache_work is running
3112  * @hrtimer: A hrtimer for scheduling a page_cache_work
3113  * @nr_bkv_objs: number of allocated objects at @bkvcache.
3114  *
3115  * This is a per-CPU structure.  The reason that it is not included in
3116  * the rcu_data structure is to permit this code to be extracted from
3117  * the RCU files.  Such extraction could allow further optimization of
3118  * the interactions with the slab allocators.
3119  */
3120 struct kfree_rcu_cpu {
3121 	struct rcu_head *head;
3122 	struct kvfree_rcu_bulk_data *bkvhead[FREE_N_CHANNELS];
3123 	struct kfree_rcu_cpu_work krw_arr[KFREE_N_BATCHES];
3124 	raw_spinlock_t lock;
3125 	struct delayed_work monitor_work;
3126 	bool monitor_todo;
3127 	bool initialized;
3128 	int count;
3129 
3130 	struct work_struct page_cache_work;
3131 	atomic_t work_in_progress;
3132 	struct hrtimer hrtimer;
3133 
3134 	struct llist_head bkvcache;
3135 	int nr_bkv_objs;
3136 };
3137 
3138 static DEFINE_PER_CPU(struct kfree_rcu_cpu, krc) = {
3139 	.lock = __RAW_SPIN_LOCK_UNLOCKED(krc.lock),
3140 };
3141 
3142 static __always_inline void
debug_rcu_bhead_unqueue(struct kvfree_rcu_bulk_data * bhead)3143 debug_rcu_bhead_unqueue(struct kvfree_rcu_bulk_data *bhead)
3144 {
3145 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
3146 	int i;
3147 
3148 	for (i = 0; i < bhead->nr_records; i++)
3149 		debug_rcu_head_unqueue((struct rcu_head *)(bhead->records[i]));
3150 #endif
3151 }
3152 
3153 static inline struct kfree_rcu_cpu *
krc_this_cpu_lock(unsigned long * flags)3154 krc_this_cpu_lock(unsigned long *flags)
3155 {
3156 	struct kfree_rcu_cpu *krcp;
3157 
3158 	local_irq_save(*flags);	// For safely calling this_cpu_ptr().
3159 	krcp = this_cpu_ptr(&krc);
3160 	raw_spin_lock(&krcp->lock);
3161 
3162 	return krcp;
3163 }
3164 
3165 static inline void
krc_this_cpu_unlock(struct kfree_rcu_cpu * krcp,unsigned long flags)3166 krc_this_cpu_unlock(struct kfree_rcu_cpu *krcp, unsigned long flags)
3167 {
3168 	raw_spin_unlock(&krcp->lock);
3169 	local_irq_restore(flags);
3170 }
3171 
3172 static inline struct kvfree_rcu_bulk_data *
get_cached_bnode(struct kfree_rcu_cpu * krcp)3173 get_cached_bnode(struct kfree_rcu_cpu *krcp)
3174 {
3175 	if (!krcp->nr_bkv_objs)
3176 		return NULL;
3177 
3178 	krcp->nr_bkv_objs--;
3179 	return (struct kvfree_rcu_bulk_data *)
3180 		llist_del_first(&krcp->bkvcache);
3181 }
3182 
3183 static inline bool
put_cached_bnode(struct kfree_rcu_cpu * krcp,struct kvfree_rcu_bulk_data * bnode)3184 put_cached_bnode(struct kfree_rcu_cpu *krcp,
3185 	struct kvfree_rcu_bulk_data *bnode)
3186 {
3187 	// Check the limit.
3188 	if (krcp->nr_bkv_objs >= rcu_min_cached_objs)
3189 		return false;
3190 
3191 	llist_add((struct llist_node *) bnode, &krcp->bkvcache);
3192 	krcp->nr_bkv_objs++;
3193 	return true;
3194 
3195 }
3196 
3197 /*
3198  * This function is invoked in workqueue context after a grace period.
3199  * It frees all the objects queued on ->bhead_free or ->head_free.
3200  */
kfree_rcu_work(struct work_struct * work)3201 static void kfree_rcu_work(struct work_struct *work)
3202 {
3203 	unsigned long flags;
3204 	struct kvfree_rcu_bulk_data *bkvhead[FREE_N_CHANNELS], *bnext;
3205 	struct rcu_head *head, *next;
3206 	struct kfree_rcu_cpu *krcp;
3207 	struct kfree_rcu_cpu_work *krwp;
3208 	int i, j;
3209 
3210 	krwp = container_of(to_rcu_work(work),
3211 			    struct kfree_rcu_cpu_work, rcu_work);
3212 	krcp = krwp->krcp;
3213 
3214 	raw_spin_lock_irqsave(&krcp->lock, flags);
3215 	// Channels 1 and 2.
3216 	for (i = 0; i < FREE_N_CHANNELS; i++) {
3217 		bkvhead[i] = krwp->bkvhead_free[i];
3218 		krwp->bkvhead_free[i] = NULL;
3219 	}
3220 
3221 	// Channel 3.
3222 	head = krwp->head_free;
3223 	krwp->head_free = NULL;
3224 	raw_spin_unlock_irqrestore(&krcp->lock, flags);
3225 
3226 	// Handle two first channels.
3227 	for (i = 0; i < FREE_N_CHANNELS; i++) {
3228 		for (; bkvhead[i]; bkvhead[i] = bnext) {
3229 			bnext = bkvhead[i]->next;
3230 			debug_rcu_bhead_unqueue(bkvhead[i]);
3231 
3232 			rcu_lock_acquire(&rcu_callback_map);
3233 			if (i == 0) { // kmalloc() / kfree().
3234 				trace_rcu_invoke_kfree_bulk_callback(
3235 					rcu_state.name, bkvhead[i]->nr_records,
3236 					bkvhead[i]->records);
3237 
3238 				kfree_bulk(bkvhead[i]->nr_records,
3239 					bkvhead[i]->records);
3240 			} else { // vmalloc() / vfree().
3241 				for (j = 0; j < bkvhead[i]->nr_records; j++) {
3242 					trace_rcu_invoke_kvfree_callback(
3243 						rcu_state.name,
3244 						bkvhead[i]->records[j], 0);
3245 
3246 					vfree(bkvhead[i]->records[j]);
3247 				}
3248 			}
3249 			rcu_lock_release(&rcu_callback_map);
3250 
3251 			raw_spin_lock_irqsave(&krcp->lock, flags);
3252 			if (put_cached_bnode(krcp, bkvhead[i]))
3253 				bkvhead[i] = NULL;
3254 			raw_spin_unlock_irqrestore(&krcp->lock, flags);
3255 
3256 			if (bkvhead[i])
3257 				free_page((unsigned long) bkvhead[i]);
3258 
3259 			cond_resched_tasks_rcu_qs();
3260 		}
3261 	}
3262 
3263 	/*
3264 	 * Emergency case only. It can happen under low memory
3265 	 * condition when an allocation gets failed, so the "bulk"
3266 	 * path can not be temporary maintained.
3267 	 */
3268 	for (; head; head = next) {
3269 		unsigned long offset = (unsigned long)head->func;
3270 		void *ptr = (void *)head - offset;
3271 
3272 		next = head->next;
3273 		debug_rcu_head_unqueue((struct rcu_head *)ptr);
3274 		rcu_lock_acquire(&rcu_callback_map);
3275 		trace_rcu_invoke_kvfree_callback(rcu_state.name, head, offset);
3276 
3277 		if (!WARN_ON_ONCE(!__is_kvfree_rcu_offset(offset)))
3278 			kvfree(ptr);
3279 
3280 		rcu_lock_release(&rcu_callback_map);
3281 		cond_resched_tasks_rcu_qs();
3282 	}
3283 }
3284 
3285 static bool
need_offload_krc(struct kfree_rcu_cpu * krcp)3286 need_offload_krc(struct kfree_rcu_cpu *krcp)
3287 {
3288 	int i;
3289 
3290 	for (i = 0; i < FREE_N_CHANNELS; i++)
3291 		if (krcp->bkvhead[i])
3292 			return true;
3293 
3294 	return !!krcp->head;
3295 }
3296 
3297 static bool
need_wait_for_krwp_work(struct kfree_rcu_cpu_work * krwp)3298 need_wait_for_krwp_work(struct kfree_rcu_cpu_work *krwp)
3299 {
3300 	int i;
3301 
3302 	for (i = 0; i < FREE_N_CHANNELS; i++)
3303 		if (krwp->bkvhead_free[i])
3304 			return true;
3305 
3306 	return !!krwp->head_free;
3307 }
3308 
3309 /*
3310  * Schedule the kfree batch RCU work to run in workqueue context after a GP.
3311  *
3312  * This function is invoked by kfree_rcu_monitor() when the KFREE_DRAIN_JIFFIES
3313  * timeout has been reached.
3314  */
queue_kfree_rcu_work(struct kfree_rcu_cpu * krcp)3315 static inline bool queue_kfree_rcu_work(struct kfree_rcu_cpu *krcp)
3316 {
3317 	struct kfree_rcu_cpu_work *krwp;
3318 	bool repeat = false;
3319 	int i, j;
3320 
3321 	lockdep_assert_held(&krcp->lock);
3322 
3323 	for (i = 0; i < KFREE_N_BATCHES; i++) {
3324 		krwp = &(krcp->krw_arr[i]);
3325 
3326 		// Try to detach bulk_head or head and attach it, only when
3327 		// all channels are free.  Any channel is not free means at krwp
3328 		// there is on-going rcu work to handle krwp's free business.
3329 		if (need_wait_for_krwp_work(krwp))
3330 			continue;
3331 
3332 		if (need_offload_krc(krcp)) {
3333 			// Channel 1 corresponds to SLAB ptrs.
3334 			// Channel 2 corresponds to vmalloc ptrs.
3335 			for (j = 0; j < FREE_N_CHANNELS; j++) {
3336 				if (!krwp->bkvhead_free[j]) {
3337 					krwp->bkvhead_free[j] = krcp->bkvhead[j];
3338 					krcp->bkvhead[j] = NULL;
3339 				}
3340 			}
3341 
3342 			// Channel 3 corresponds to emergency path.
3343 			if (!krwp->head_free) {
3344 				krwp->head_free = krcp->head;
3345 				krcp->head = NULL;
3346 			}
3347 
3348 			WRITE_ONCE(krcp->count, 0);
3349 
3350 			/*
3351 			 * One work is per one batch, so there are three
3352 			 * "free channels", the batch can handle. It can
3353 			 * be that the work is in the pending state when
3354 			 * channels have been detached following by each
3355 			 * other.
3356 			 */
3357 			queue_rcu_work(system_wq, &krwp->rcu_work);
3358 		}
3359 	}
3360 
3361 	// Repeat if any "free" corresponding channel is still busy.
3362 	if (need_offload_krc(krcp))
3363 		repeat = true;
3364 
3365 	return !repeat;
3366 }
3367 
kfree_rcu_drain_unlock(struct kfree_rcu_cpu * krcp,unsigned long flags)3368 static inline void kfree_rcu_drain_unlock(struct kfree_rcu_cpu *krcp,
3369 					  unsigned long flags)
3370 {
3371 	// Attempt to start a new batch.
3372 	krcp->monitor_todo = false;
3373 	if (queue_kfree_rcu_work(krcp)) {
3374 		// Success! Our job is done here.
3375 		raw_spin_unlock_irqrestore(&krcp->lock, flags);
3376 		return;
3377 	}
3378 
3379 	// Previous RCU batch still in progress, try again later.
3380 	krcp->monitor_todo = true;
3381 	schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
3382 	raw_spin_unlock_irqrestore(&krcp->lock, flags);
3383 }
3384 
3385 /*
3386  * This function is invoked after the KFREE_DRAIN_JIFFIES timeout.
3387  * It invokes kfree_rcu_drain_unlock() to attempt to start another batch.
3388  */
kfree_rcu_monitor(struct work_struct * work)3389 static void kfree_rcu_monitor(struct work_struct *work)
3390 {
3391 	unsigned long flags;
3392 	struct kfree_rcu_cpu *krcp = container_of(work, struct kfree_rcu_cpu,
3393 						 monitor_work.work);
3394 
3395 	raw_spin_lock_irqsave(&krcp->lock, flags);
3396 	if (krcp->monitor_todo)
3397 		kfree_rcu_drain_unlock(krcp, flags);
3398 	else
3399 		raw_spin_unlock_irqrestore(&krcp->lock, flags);
3400 }
3401 
3402 static enum hrtimer_restart
schedule_page_work_fn(struct hrtimer * t)3403 schedule_page_work_fn(struct hrtimer *t)
3404 {
3405 	struct kfree_rcu_cpu *krcp =
3406 		container_of(t, struct kfree_rcu_cpu, hrtimer);
3407 
3408 	queue_work(system_highpri_wq, &krcp->page_cache_work);
3409 	return HRTIMER_NORESTART;
3410 }
3411 
fill_page_cache_func(struct work_struct * work)3412 static void fill_page_cache_func(struct work_struct *work)
3413 {
3414 	struct kvfree_rcu_bulk_data *bnode;
3415 	struct kfree_rcu_cpu *krcp =
3416 		container_of(work, struct kfree_rcu_cpu,
3417 			page_cache_work);
3418 	unsigned long flags;
3419 	bool pushed;
3420 	int i;
3421 
3422 	for (i = 0; i < rcu_min_cached_objs; i++) {
3423 		bnode = (struct kvfree_rcu_bulk_data *)
3424 			__get_free_page(GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
3425 
3426 		if (!bnode)
3427 			break;
3428 
3429 		raw_spin_lock_irqsave(&krcp->lock, flags);
3430 		pushed = put_cached_bnode(krcp, bnode);
3431 		raw_spin_unlock_irqrestore(&krcp->lock, flags);
3432 
3433 		if (!pushed) {
3434 			free_page((unsigned long) bnode);
3435 			break;
3436 		}
3437 	}
3438 
3439 	atomic_set(&krcp->work_in_progress, 0);
3440 }
3441 
3442 static void
run_page_cache_worker(struct kfree_rcu_cpu * krcp)3443 run_page_cache_worker(struct kfree_rcu_cpu *krcp)
3444 {
3445 	if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
3446 			!atomic_xchg(&krcp->work_in_progress, 1)) {
3447 		hrtimer_init(&krcp->hrtimer, CLOCK_MONOTONIC,
3448 			HRTIMER_MODE_REL);
3449 		krcp->hrtimer.function = schedule_page_work_fn;
3450 		hrtimer_start(&krcp->hrtimer, 0, HRTIMER_MODE_REL);
3451 	}
3452 }
3453 
3454 static inline bool
kvfree_call_rcu_add_ptr_to_bulk(struct kfree_rcu_cpu * krcp,void * ptr)3455 kvfree_call_rcu_add_ptr_to_bulk(struct kfree_rcu_cpu *krcp, void *ptr)
3456 {
3457 	struct kvfree_rcu_bulk_data *bnode;
3458 	int idx;
3459 
3460 	if (unlikely(!krcp->initialized))
3461 		return false;
3462 
3463 	lockdep_assert_held(&krcp->lock);
3464 	idx = !!is_vmalloc_addr(ptr);
3465 
3466 	/* Check if a new block is required. */
3467 	if (!krcp->bkvhead[idx] ||
3468 			krcp->bkvhead[idx]->nr_records == KVFREE_BULK_MAX_ENTR) {
3469 		bnode = get_cached_bnode(krcp);
3470 		/* Switch to emergency path. */
3471 		if (!bnode)
3472 			return false;
3473 
3474 		/* Initialize the new block. */
3475 		bnode->nr_records = 0;
3476 		bnode->next = krcp->bkvhead[idx];
3477 
3478 		/* Attach it to the head. */
3479 		krcp->bkvhead[idx] = bnode;
3480 	}
3481 
3482 	/* Finally insert. */
3483 	krcp->bkvhead[idx]->records
3484 		[krcp->bkvhead[idx]->nr_records++] = ptr;
3485 
3486 	return true;
3487 }
3488 
3489 /*
3490  * Queue a request for lazy invocation of appropriate free routine after a
3491  * grace period. Please note there are three paths are maintained, two are the
3492  * main ones that use array of pointers interface and third one is emergency
3493  * one, that is used only when the main path can not be maintained temporary,
3494  * due to memory pressure.
3495  *
3496  * Each kvfree_call_rcu() request is added to a batch. The batch will be drained
3497  * every KFREE_DRAIN_JIFFIES number of jiffies. All the objects in the batch will
3498  * be free'd in workqueue context. This allows us to: batch requests together to
3499  * reduce the number of grace periods during heavy kfree_rcu()/kvfree_rcu() load.
3500  */
kvfree_call_rcu(struct rcu_head * head,rcu_callback_t func)3501 void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
3502 {
3503 	unsigned long flags;
3504 	struct kfree_rcu_cpu *krcp;
3505 	bool success;
3506 	void *ptr;
3507 
3508 	if (head) {
3509 		ptr = (void *) head - (unsigned long) func;
3510 	} else {
3511 		/*
3512 		 * Please note there is a limitation for the head-less
3513 		 * variant, that is why there is a clear rule for such
3514 		 * objects: it can be used from might_sleep() context
3515 		 * only. For other places please embed an rcu_head to
3516 		 * your data.
3517 		 */
3518 		might_sleep();
3519 		ptr = (unsigned long *) func;
3520 	}
3521 
3522 	krcp = krc_this_cpu_lock(&flags);
3523 
3524 	// Queue the object but don't yet schedule the batch.
3525 	if (debug_rcu_head_queue(ptr)) {
3526 		// Probable double kfree_rcu(), just leak.
3527 		WARN_ONCE(1, "%s(): Double-freed call. rcu_head %p\n",
3528 			  __func__, head);
3529 
3530 		// Mark as success and leave.
3531 		success = true;
3532 		goto unlock_return;
3533 	}
3534 
3535 	success = kvfree_call_rcu_add_ptr_to_bulk(krcp, ptr);
3536 	if (!success) {
3537 		run_page_cache_worker(krcp);
3538 
3539 		if (head == NULL)
3540 			// Inline if kvfree_rcu(one_arg) call.
3541 			goto unlock_return;
3542 
3543 		head->func = func;
3544 		head->next = krcp->head;
3545 		krcp->head = head;
3546 		success = true;
3547 	}
3548 
3549 	WRITE_ONCE(krcp->count, krcp->count + 1);
3550 
3551 	/*
3552 	 * The kvfree_rcu() caller considers the pointer freed at this point
3553 	 * and likely removes any references to it. Since the actual slab
3554 	 * freeing (and kmemleak_free()) is deferred, tell kmemleak to ignore
3555 	 * this object (no scanning or false positives reporting).
3556 	 */
3557 	kmemleak_ignore(ptr);
3558 
3559 	// Set timer to drain after KFREE_DRAIN_JIFFIES.
3560 	if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
3561 	    !krcp->monitor_todo) {
3562 		krcp->monitor_todo = true;
3563 		schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
3564 	}
3565 
3566 unlock_return:
3567 	krc_this_cpu_unlock(krcp, flags);
3568 
3569 	/*
3570 	 * Inline kvfree() after synchronize_rcu(). We can do
3571 	 * it from might_sleep() context only, so the current
3572 	 * CPU can pass the QS state.
3573 	 */
3574 	if (!success) {
3575 		debug_rcu_head_unqueue((struct rcu_head *) ptr);
3576 		synchronize_rcu();
3577 		kvfree(ptr);
3578 	}
3579 }
3580 EXPORT_SYMBOL_GPL(kvfree_call_rcu);
3581 
3582 static unsigned long
kfree_rcu_shrink_count(struct shrinker * shrink,struct shrink_control * sc)3583 kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
3584 {
3585 	int cpu;
3586 	unsigned long count = 0;
3587 
3588 	/* Snapshot count of all CPUs */
3589 	for_each_possible_cpu(cpu) {
3590 		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3591 
3592 		count += READ_ONCE(krcp->count);
3593 	}
3594 
3595 	return count;
3596 }
3597 
3598 static unsigned long
kfree_rcu_shrink_scan(struct shrinker * shrink,struct shrink_control * sc)3599 kfree_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
3600 {
3601 	int cpu, freed = 0;
3602 	unsigned long flags;
3603 
3604 	for_each_possible_cpu(cpu) {
3605 		int count;
3606 		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3607 
3608 		count = krcp->count;
3609 		raw_spin_lock_irqsave(&krcp->lock, flags);
3610 		if (krcp->monitor_todo)
3611 			kfree_rcu_drain_unlock(krcp, flags);
3612 		else
3613 			raw_spin_unlock_irqrestore(&krcp->lock, flags);
3614 
3615 		sc->nr_to_scan -= count;
3616 		freed += count;
3617 
3618 		if (sc->nr_to_scan <= 0)
3619 			break;
3620 	}
3621 
3622 	return freed == 0 ? SHRINK_STOP : freed;
3623 }
3624 
3625 static struct shrinker kfree_rcu_shrinker = {
3626 	.count_objects = kfree_rcu_shrink_count,
3627 	.scan_objects = kfree_rcu_shrink_scan,
3628 	.batch = 0,
3629 	.seeks = DEFAULT_SEEKS,
3630 };
3631 
kfree_rcu_scheduler_running(void)3632 void __init kfree_rcu_scheduler_running(void)
3633 {
3634 	int cpu;
3635 	unsigned long flags;
3636 
3637 	for_each_possible_cpu(cpu) {
3638 		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3639 
3640 		raw_spin_lock_irqsave(&krcp->lock, flags);
3641 		if (!krcp->head || krcp->monitor_todo) {
3642 			raw_spin_unlock_irqrestore(&krcp->lock, flags);
3643 			continue;
3644 		}
3645 		krcp->monitor_todo = true;
3646 		schedule_delayed_work_on(cpu, &krcp->monitor_work,
3647 					 KFREE_DRAIN_JIFFIES);
3648 		raw_spin_unlock_irqrestore(&krcp->lock, flags);
3649 	}
3650 }
3651 
3652 /*
3653  * During early boot, any blocking grace-period wait automatically
3654  * implies a grace period.  Later on, this is never the case for PREEMPTION.
3655  *
3656  * Howevr, because a context switch is a grace period for !PREEMPTION, any
3657  * blocking grace-period wait automatically implies a grace period if
3658  * there is only one CPU online at any point time during execution of
3659  * either synchronize_rcu() or synchronize_rcu_expedited().  It is OK to
3660  * occasionally incorrectly indicate that there are multiple CPUs online
3661  * when there was in fact only one the whole time, as this just adds some
3662  * overhead: RCU still operates correctly.
3663  */
rcu_blocking_is_gp(void)3664 static int rcu_blocking_is_gp(void)
3665 {
3666 	int ret;
3667 
3668 	if (IS_ENABLED(CONFIG_PREEMPTION))
3669 		return rcu_scheduler_active == RCU_SCHEDULER_INACTIVE;
3670 	might_sleep();  /* Check for RCU read-side critical section. */
3671 	preempt_disable();
3672 	ret = num_online_cpus() <= 1;
3673 	preempt_enable();
3674 	return ret;
3675 }
3676 
3677 /**
3678  * synchronize_rcu - wait until a grace period has elapsed.
3679  *
3680  * Control will return to the caller some time after a full grace
3681  * period has elapsed, in other words after all currently executing RCU
3682  * read-side critical sections have completed.  Note, however, that
3683  * upon return from synchronize_rcu(), the caller might well be executing
3684  * concurrently with new RCU read-side critical sections that began while
3685  * synchronize_rcu() was waiting.  RCU read-side critical sections are
3686  * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
3687  * In addition, regions of code across which interrupts, preemption, or
3688  * softirqs have been disabled also serve as RCU read-side critical
3689  * sections.  This includes hardware interrupt handlers, softirq handlers,
3690  * and NMI handlers.
3691  *
3692  * Note that this guarantee implies further memory-ordering guarantees.
3693  * On systems with more than one CPU, when synchronize_rcu() returns,
3694  * each CPU is guaranteed to have executed a full memory barrier since
3695  * the end of its last RCU read-side critical section whose beginning
3696  * preceded the call to synchronize_rcu().  In addition, each CPU having
3697  * an RCU read-side critical section that extends beyond the return from
3698  * synchronize_rcu() is guaranteed to have executed a full memory barrier
3699  * after the beginning of synchronize_rcu() and before the beginning of
3700  * that RCU read-side critical section.  Note that these guarantees include
3701  * CPUs that are offline, idle, or executing in user mode, as well as CPUs
3702  * that are executing in the kernel.
3703  *
3704  * Furthermore, if CPU A invoked synchronize_rcu(), which returned
3705  * to its caller on CPU B, then both CPU A and CPU B are guaranteed
3706  * to have executed a full memory barrier during the execution of
3707  * synchronize_rcu() -- even if CPU A and CPU B are the same CPU (but
3708  * again only if the system has more than one CPU).
3709  */
synchronize_rcu(void)3710 void synchronize_rcu(void)
3711 {
3712 	RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
3713 			 lock_is_held(&rcu_lock_map) ||
3714 			 lock_is_held(&rcu_sched_lock_map),
3715 			 "Illegal synchronize_rcu() in RCU read-side critical section");
3716 	if (rcu_blocking_is_gp())
3717 		return;
3718 	if (rcu_gp_is_expedited())
3719 		synchronize_rcu_expedited();
3720 	else
3721 		wait_rcu_gp(call_rcu);
3722 }
3723 EXPORT_SYMBOL_GPL(synchronize_rcu);
3724 
3725 /**
3726  * get_state_synchronize_rcu - Snapshot current RCU state
3727  *
3728  * Returns a cookie that is used by a later call to cond_synchronize_rcu()
3729  * to determine whether or not a full grace period has elapsed in the
3730  * meantime.
3731  */
get_state_synchronize_rcu(void)3732 unsigned long get_state_synchronize_rcu(void)
3733 {
3734 	/*
3735 	 * Any prior manipulation of RCU-protected data must happen
3736 	 * before the load from ->gp_seq.
3737 	 */
3738 	smp_mb();  /* ^^^ */
3739 	return rcu_seq_snap(&rcu_state.gp_seq);
3740 }
3741 EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
3742 
3743 /**
3744  * cond_synchronize_rcu - Conditionally wait for an RCU grace period
3745  *
3746  * @oldstate: return value from earlier call to get_state_synchronize_rcu()
3747  *
3748  * If a full RCU grace period has elapsed since the earlier call to
3749  * get_state_synchronize_rcu(), just return.  Otherwise, invoke
3750  * synchronize_rcu() to wait for a full grace period.
3751  *
3752  * Yes, this function does not take counter wrap into account.  But
3753  * counter wrap is harmless.  If the counter wraps, we have waited for
3754  * more than 2 billion grace periods (and way more on a 64-bit system!),
3755  * so waiting for one additional grace period should be just fine.
3756  */
cond_synchronize_rcu(unsigned long oldstate)3757 void cond_synchronize_rcu(unsigned long oldstate)
3758 {
3759 	if (!rcu_seq_done(&rcu_state.gp_seq, oldstate))
3760 		synchronize_rcu();
3761 	else
3762 		smp_mb(); /* Ensure GP ends before subsequent accesses. */
3763 }
3764 EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
3765 
3766 /*
3767  * Check to see if there is any immediate RCU-related work to be done by
3768  * the current CPU, returning 1 if so and zero otherwise.  The checks are
3769  * in order of increasing expense: checks that can be carried out against
3770  * CPU-local state are performed first.  However, we must check for CPU
3771  * stalls first, else we might not get a chance.
3772  */
rcu_pending(int user)3773 static int rcu_pending(int user)
3774 {
3775 	bool gp_in_progress;
3776 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
3777 	struct rcu_node *rnp = rdp->mynode;
3778 
3779 	lockdep_assert_irqs_disabled();
3780 
3781 	/* Check for CPU stalls, if enabled. */
3782 	check_cpu_stall(rdp);
3783 
3784 	/* Does this CPU need a deferred NOCB wakeup? */
3785 	if (rcu_nocb_need_deferred_wakeup(rdp))
3786 		return 1;
3787 
3788 	/* Is this a nohz_full CPU in userspace or idle?  (Ignore RCU if so.) */
3789 	if ((user || rcu_is_cpu_rrupt_from_idle()) && rcu_nohz_full_cpu())
3790 		return 0;
3791 
3792 	/* Is the RCU core waiting for a quiescent state from this CPU? */
3793 	gp_in_progress = rcu_gp_in_progress();
3794 	if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm && gp_in_progress)
3795 		return 1;
3796 
3797 	/* Does this CPU have callbacks ready to invoke? */
3798 	if (rcu_segcblist_ready_cbs(&rdp->cblist))
3799 		return 1;
3800 
3801 	/* Has RCU gone idle with this CPU needing another grace period? */
3802 	if (!gp_in_progress && rcu_segcblist_is_enabled(&rdp->cblist) &&
3803 	    (!IS_ENABLED(CONFIG_RCU_NOCB_CPU) ||
3804 	     !rcu_segcblist_is_offloaded(&rdp->cblist)) &&
3805 	    !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
3806 		return 1;
3807 
3808 	/* Have RCU grace period completed or started?  */
3809 	if (rcu_seq_current(&rnp->gp_seq) != rdp->gp_seq ||
3810 	    unlikely(READ_ONCE(rdp->gpwrap))) /* outside lock */
3811 		return 1;
3812 
3813 	/* nothing to do */
3814 	return 0;
3815 }
3816 
3817 /*
3818  * Helper function for rcu_barrier() tracing.  If tracing is disabled,
3819  * the compiler is expected to optimize this away.
3820  */
rcu_barrier_trace(const char * s,int cpu,unsigned long done)3821 static void rcu_barrier_trace(const char *s, int cpu, unsigned long done)
3822 {
3823 	trace_rcu_barrier(rcu_state.name, s, cpu,
3824 			  atomic_read(&rcu_state.barrier_cpu_count), done);
3825 }
3826 
3827 /*
3828  * RCU callback function for rcu_barrier().  If we are last, wake
3829  * up the task executing rcu_barrier().
3830  *
3831  * Note that the value of rcu_state.barrier_sequence must be captured
3832  * before the atomic_dec_and_test().  Otherwise, if this CPU is not last,
3833  * other CPUs might count the value down to zero before this CPU gets
3834  * around to invoking rcu_barrier_trace(), which might result in bogus
3835  * data from the next instance of rcu_barrier().
3836  */
rcu_barrier_callback(struct rcu_head * rhp)3837 static void rcu_barrier_callback(struct rcu_head *rhp)
3838 {
3839 	unsigned long __maybe_unused s = rcu_state.barrier_sequence;
3840 
3841 	if (atomic_dec_and_test(&rcu_state.barrier_cpu_count)) {
3842 		rcu_barrier_trace(TPS("LastCB"), -1, s);
3843 		complete(&rcu_state.barrier_completion);
3844 	} else {
3845 		rcu_barrier_trace(TPS("CB"), -1, s);
3846 	}
3847 }
3848 
3849 /*
3850  * Called with preemption disabled, and from cross-cpu IRQ context.
3851  */
rcu_barrier_func(void * cpu_in)3852 static void rcu_barrier_func(void *cpu_in)
3853 {
3854 	uintptr_t cpu = (uintptr_t)cpu_in;
3855 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
3856 
3857 	rcu_barrier_trace(TPS("IRQ"), -1, rcu_state.barrier_sequence);
3858 	rdp->barrier_head.func = rcu_barrier_callback;
3859 	debug_rcu_head_queue(&rdp->barrier_head);
3860 	rcu_nocb_lock(rdp);
3861 	WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies));
3862 	if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head)) {
3863 		atomic_inc(&rcu_state.barrier_cpu_count);
3864 	} else {
3865 		debug_rcu_head_unqueue(&rdp->barrier_head);
3866 		rcu_barrier_trace(TPS("IRQNQ"), -1,
3867 				  rcu_state.barrier_sequence);
3868 	}
3869 	rcu_nocb_unlock(rdp);
3870 }
3871 
3872 /**
3873  * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
3874  *
3875  * Note that this primitive does not necessarily wait for an RCU grace period
3876  * to complete.  For example, if there are no RCU callbacks queued anywhere
3877  * in the system, then rcu_barrier() is within its rights to return
3878  * immediately, without waiting for anything, much less an RCU grace period.
3879  */
rcu_barrier(void)3880 void rcu_barrier(void)
3881 {
3882 	uintptr_t cpu;
3883 	struct rcu_data *rdp;
3884 	unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence);
3885 
3886 	rcu_barrier_trace(TPS("Begin"), -1, s);
3887 
3888 	/* Take mutex to serialize concurrent rcu_barrier() requests. */
3889 	mutex_lock(&rcu_state.barrier_mutex);
3890 
3891 	/* Did someone else do our work for us? */
3892 	if (rcu_seq_done(&rcu_state.barrier_sequence, s)) {
3893 		rcu_barrier_trace(TPS("EarlyExit"), -1,
3894 				  rcu_state.barrier_sequence);
3895 		smp_mb(); /* caller's subsequent code after above check. */
3896 		mutex_unlock(&rcu_state.barrier_mutex);
3897 		return;
3898 	}
3899 
3900 	/* Mark the start of the barrier operation. */
3901 	rcu_seq_start(&rcu_state.barrier_sequence);
3902 	rcu_barrier_trace(TPS("Inc1"), -1, rcu_state.barrier_sequence);
3903 
3904 	/*
3905 	 * Initialize the count to two rather than to zero in order
3906 	 * to avoid a too-soon return to zero in case of an immediate
3907 	 * invocation of the just-enqueued callback (or preemption of
3908 	 * this task).  Exclude CPU-hotplug operations to ensure that no
3909 	 * offline non-offloaded CPU has callbacks queued.
3910 	 */
3911 	init_completion(&rcu_state.barrier_completion);
3912 	atomic_set(&rcu_state.barrier_cpu_count, 2);
3913 	get_online_cpus();
3914 
3915 	/*
3916 	 * Force each CPU with callbacks to register a new callback.
3917 	 * When that callback is invoked, we will know that all of the
3918 	 * corresponding CPU's preceding callbacks have been invoked.
3919 	 */
3920 	for_each_possible_cpu(cpu) {
3921 		rdp = per_cpu_ptr(&rcu_data, cpu);
3922 		if (cpu_is_offline(cpu) &&
3923 		    !rcu_segcblist_is_offloaded(&rdp->cblist))
3924 			continue;
3925 		if (rcu_segcblist_n_cbs(&rdp->cblist) && cpu_online(cpu)) {
3926 			rcu_barrier_trace(TPS("OnlineQ"), cpu,
3927 					  rcu_state.barrier_sequence);
3928 			smp_call_function_single(cpu, rcu_barrier_func, (void *)cpu, 1);
3929 		} else if (rcu_segcblist_n_cbs(&rdp->cblist) &&
3930 			   cpu_is_offline(cpu)) {
3931 			rcu_barrier_trace(TPS("OfflineNoCBQ"), cpu,
3932 					  rcu_state.barrier_sequence);
3933 			local_irq_disable();
3934 			rcu_barrier_func((void *)cpu);
3935 			local_irq_enable();
3936 		} else if (cpu_is_offline(cpu)) {
3937 			rcu_barrier_trace(TPS("OfflineNoCBNoQ"), cpu,
3938 					  rcu_state.barrier_sequence);
3939 		} else {
3940 			rcu_barrier_trace(TPS("OnlineNQ"), cpu,
3941 					  rcu_state.barrier_sequence);
3942 		}
3943 	}
3944 	put_online_cpus();
3945 
3946 	/*
3947 	 * Now that we have an rcu_barrier_callback() callback on each
3948 	 * CPU, and thus each counted, remove the initial count.
3949 	 */
3950 	if (atomic_sub_and_test(2, &rcu_state.barrier_cpu_count))
3951 		complete(&rcu_state.barrier_completion);
3952 
3953 	/* Wait for all rcu_barrier_callback() callbacks to be invoked. */
3954 	wait_for_completion(&rcu_state.barrier_completion);
3955 
3956 	/* Mark the end of the barrier operation. */
3957 	rcu_barrier_trace(TPS("Inc2"), -1, rcu_state.barrier_sequence);
3958 	rcu_seq_end(&rcu_state.barrier_sequence);
3959 
3960 	/* Other rcu_barrier() invocations can now safely proceed. */
3961 	mutex_unlock(&rcu_state.barrier_mutex);
3962 }
3963 EXPORT_SYMBOL_GPL(rcu_barrier);
3964 
3965 /*
3966  * Propagate ->qsinitmask bits up the rcu_node tree to account for the
3967  * first CPU in a given leaf rcu_node structure coming online.  The caller
3968  * must hold the corresponding leaf rcu_node ->lock with interrrupts
3969  * disabled.
3970  */
rcu_init_new_rnp(struct rcu_node * rnp_leaf)3971 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
3972 {
3973 	long mask;
3974 	long oldmask;
3975 	struct rcu_node *rnp = rnp_leaf;
3976 
3977 	raw_lockdep_assert_held_rcu_node(rnp_leaf);
3978 	WARN_ON_ONCE(rnp->wait_blkd_tasks);
3979 	for (;;) {
3980 		mask = rnp->grpmask;
3981 		rnp = rnp->parent;
3982 		if (rnp == NULL)
3983 			return;
3984 		raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */
3985 		oldmask = rnp->qsmaskinit;
3986 		rnp->qsmaskinit |= mask;
3987 		raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */
3988 		if (oldmask)
3989 			return;
3990 	}
3991 }
3992 
3993 /*
3994  * Do boot-time initialization of a CPU's per-CPU RCU data.
3995  */
3996 static void __init
rcu_boot_init_percpu_data(int cpu)3997 rcu_boot_init_percpu_data(int cpu)
3998 {
3999 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4000 
4001 	/* Set up local state, ensuring consistent view of global state. */
4002 	rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
4003 	INIT_WORK(&rdp->strict_work, strict_work_handler);
4004 	WARN_ON_ONCE(rdp->dynticks_nesting != 1);
4005 	WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp)));
4006 	rdp->rcu_ofl_gp_seq = rcu_state.gp_seq;
4007 	rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED;
4008 	rdp->rcu_onl_gp_seq = rcu_state.gp_seq;
4009 	rdp->rcu_onl_gp_flags = RCU_GP_CLEANED;
4010 	rdp->cpu = cpu;
4011 	rcu_boot_init_nocb_percpu_data(rdp);
4012 }
4013 
4014 /*
4015  * Invoked early in the CPU-online process, when pretty much all services
4016  * are available.  The incoming CPU is not present.
4017  *
4018  * Initializes a CPU's per-CPU RCU data.  Note that only one online or
4019  * offline event can be happening at a given time.  Note also that we can
4020  * accept some slop in the rsp->gp_seq access due to the fact that this
4021  * CPU cannot possibly have any non-offloaded RCU callbacks in flight yet.
4022  * And any offloaded callbacks are being numbered elsewhere.
4023  */
rcutree_prepare_cpu(unsigned int cpu)4024 int rcutree_prepare_cpu(unsigned int cpu)
4025 {
4026 	unsigned long flags;
4027 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4028 	struct rcu_node *rnp = rcu_get_root();
4029 
4030 	/* Set up local state, ensuring consistent view of global state. */
4031 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
4032 	rdp->qlen_last_fqs_check = 0;
4033 	rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
4034 	rdp->blimit = blimit;
4035 	if (rcu_segcblist_empty(&rdp->cblist) && /* No early-boot CBs? */
4036 	    !rcu_segcblist_is_offloaded(&rdp->cblist))
4037 		rcu_segcblist_init(&rdp->cblist);  /* Re-enable callbacks. */
4038 	rdp->dynticks_nesting = 1;	/* CPU not up, no tearing. */
4039 	rcu_dynticks_eqs_online();
4040 	raw_spin_unlock_rcu_node(rnp);		/* irqs remain disabled. */
4041 
4042 	/*
4043 	 * Add CPU to leaf rcu_node pending-online bitmask.  Any needed
4044 	 * propagation up the rcu_node tree will happen at the beginning
4045 	 * of the next grace period.
4046 	 */
4047 	rnp = rdp->mynode;
4048 	raw_spin_lock_rcu_node(rnp);		/* irqs already disabled. */
4049 	rdp->beenonline = true;	 /* We have now been online. */
4050 	rdp->gp_seq = READ_ONCE(rnp->gp_seq);
4051 	rdp->gp_seq_needed = rdp->gp_seq;
4052 	rdp->cpu_no_qs.b.norm = true;
4053 	rdp->core_needs_qs = false;
4054 	rdp->rcu_iw_pending = false;
4055 	rdp->rcu_iw_gp_seq = rdp->gp_seq - 1;
4056 	trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl"));
4057 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4058 	rcu_prepare_kthreads(cpu);
4059 	rcu_spawn_cpu_nocb_kthread(cpu);
4060 
4061 	return 0;
4062 }
4063 
4064 /*
4065  * Update RCU priority boot kthread affinity for CPU-hotplug changes.
4066  */
rcutree_affinity_setting(unsigned int cpu,int outgoing)4067 static void rcutree_affinity_setting(unsigned int cpu, int outgoing)
4068 {
4069 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4070 
4071 	rcu_boost_kthread_setaffinity(rdp->mynode, outgoing);
4072 }
4073 
4074 /*
4075  * Near the end of the CPU-online process.  Pretty much all services
4076  * enabled, and the CPU is now very much alive.
4077  */
rcutree_online_cpu(unsigned int cpu)4078 int rcutree_online_cpu(unsigned int cpu)
4079 {
4080 	unsigned long flags;
4081 	struct rcu_data *rdp;
4082 	struct rcu_node *rnp;
4083 
4084 	rdp = per_cpu_ptr(&rcu_data, cpu);
4085 	rnp = rdp->mynode;
4086 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
4087 	rnp->ffmask |= rdp->grpmask;
4088 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4089 	if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
4090 		return 0; /* Too early in boot for scheduler work. */
4091 	sync_sched_exp_online_cleanup(cpu);
4092 	rcutree_affinity_setting(cpu, -1);
4093 
4094 	// Stop-machine done, so allow nohz_full to disable tick.
4095 	tick_dep_clear(TICK_DEP_BIT_RCU);
4096 	return 0;
4097 }
4098 
4099 /*
4100  * Near the beginning of the process.  The CPU is still very much alive
4101  * with pretty much all services enabled.
4102  */
rcutree_offline_cpu(unsigned int cpu)4103 int rcutree_offline_cpu(unsigned int cpu)
4104 {
4105 	unsigned long flags;
4106 	struct rcu_data *rdp;
4107 	struct rcu_node *rnp;
4108 
4109 	rdp = per_cpu_ptr(&rcu_data, cpu);
4110 	rnp = rdp->mynode;
4111 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
4112 	rnp->ffmask &= ~rdp->grpmask;
4113 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4114 
4115 	rcutree_affinity_setting(cpu, cpu);
4116 
4117 	// nohz_full CPUs need the tick for stop-machine to work quickly
4118 	tick_dep_set(TICK_DEP_BIT_RCU);
4119 	return 0;
4120 }
4121 
4122 /*
4123  * Mark the specified CPU as being online so that subsequent grace periods
4124  * (both expedited and normal) will wait on it.  Note that this means that
4125  * incoming CPUs are not allowed to use RCU read-side critical sections
4126  * until this function is called.  Failing to observe this restriction
4127  * will result in lockdep splats.
4128  *
4129  * Note that this function is special in that it is invoked directly
4130  * from the incoming CPU rather than from the cpuhp_step mechanism.
4131  * This is because this function must be invoked at a precise location.
4132  */
rcu_cpu_starting(unsigned int cpu)4133 void rcu_cpu_starting(unsigned int cpu)
4134 {
4135 	unsigned long flags;
4136 	unsigned long mask;
4137 	struct rcu_data *rdp;
4138 	struct rcu_node *rnp;
4139 	bool newcpu;
4140 
4141 	rdp = per_cpu_ptr(&rcu_data, cpu);
4142 	if (rdp->cpu_started)
4143 		return;
4144 	rdp->cpu_started = true;
4145 
4146 	rnp = rdp->mynode;
4147 	mask = rdp->grpmask;
4148 	WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1);
4149 	WARN_ON_ONCE(!(rnp->ofl_seq & 0x1));
4150 	smp_mb(); // Pair with rcu_gp_cleanup()'s ->ofl_seq barrier().
4151 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
4152 	WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext | mask);
4153 	newcpu = !(rnp->expmaskinitnext & mask);
4154 	rnp->expmaskinitnext |= mask;
4155 	/* Allow lockless access for expedited grace periods. */
4156 	smp_store_release(&rcu_state.ncpus, rcu_state.ncpus + newcpu); /* ^^^ */
4157 	ASSERT_EXCLUSIVE_WRITER(rcu_state.ncpus);
4158 	rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */
4159 	rdp->rcu_onl_gp_seq = READ_ONCE(rcu_state.gp_seq);
4160 	rdp->rcu_onl_gp_flags = READ_ONCE(rcu_state.gp_flags);
4161 	if (rnp->qsmask & mask) { /* RCU waiting on incoming CPU? */
4162 		rcu_disable_urgency_upon_qs(rdp);
4163 		/* Report QS -after- changing ->qsmaskinitnext! */
4164 		rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
4165 	} else {
4166 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4167 	}
4168 	smp_mb(); // Pair with rcu_gp_cleanup()'s ->ofl_seq barrier().
4169 	WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1);
4170 	WARN_ON_ONCE(rnp->ofl_seq & 0x1);
4171 	smp_mb(); /* Ensure RCU read-side usage follows above initialization. */
4172 }
4173 
4174 /*
4175  * The outgoing function has no further need of RCU, so remove it from
4176  * the rcu_node tree's ->qsmaskinitnext bit masks.
4177  *
4178  * Note that this function is special in that it is invoked directly
4179  * from the outgoing CPU rather than from the cpuhp_step mechanism.
4180  * This is because this function must be invoked at a precise location.
4181  */
rcu_report_dead(unsigned int cpu)4182 void rcu_report_dead(unsigned int cpu)
4183 {
4184 	unsigned long flags;
4185 	unsigned long mask;
4186 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4187 	struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */
4188 
4189 	/* QS for any half-done expedited grace period. */
4190 	preempt_disable();
4191 	rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
4192 	preempt_enable();
4193 	rcu_preempt_deferred_qs(current);
4194 
4195 	/* Remove outgoing CPU from mask in the leaf rcu_node structure. */
4196 	mask = rdp->grpmask;
4197 	WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1);
4198 	WARN_ON_ONCE(!(rnp->ofl_seq & 0x1));
4199 	smp_mb(); // Pair with rcu_gp_cleanup()'s ->ofl_seq barrier().
4200 	raw_spin_lock(&rcu_state.ofl_lock);
4201 	raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
4202 	rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq);
4203 	rdp->rcu_ofl_gp_flags = READ_ONCE(rcu_state.gp_flags);
4204 	if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */
4205 		/* Report quiescent state -before- changing ->qsmaskinitnext! */
4206 		rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
4207 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
4208 	}
4209 	WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext & ~mask);
4210 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4211 	raw_spin_unlock(&rcu_state.ofl_lock);
4212 	smp_mb(); // Pair with rcu_gp_cleanup()'s ->ofl_seq barrier().
4213 	WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1);
4214 	WARN_ON_ONCE(rnp->ofl_seq & 0x1);
4215 
4216 	rdp->cpu_started = false;
4217 }
4218 
4219 #ifdef CONFIG_HOTPLUG_CPU
4220 /*
4221  * The outgoing CPU has just passed through the dying-idle state, and we
4222  * are being invoked from the CPU that was IPIed to continue the offline
4223  * operation.  Migrate the outgoing CPU's callbacks to the current CPU.
4224  */
rcutree_migrate_callbacks(int cpu)4225 void rcutree_migrate_callbacks(int cpu)
4226 {
4227 	unsigned long flags;
4228 	struct rcu_data *my_rdp;
4229 	struct rcu_node *my_rnp;
4230 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4231 	bool needwake;
4232 
4233 	if (rcu_segcblist_is_offloaded(&rdp->cblist) ||
4234 	    rcu_segcblist_empty(&rdp->cblist))
4235 		return;  /* No callbacks to migrate. */
4236 
4237 	local_irq_save(flags);
4238 	my_rdp = this_cpu_ptr(&rcu_data);
4239 	my_rnp = my_rdp->mynode;
4240 	rcu_nocb_lock(my_rdp); /* irqs already disabled. */
4241 	WARN_ON_ONCE(!rcu_nocb_flush_bypass(my_rdp, NULL, jiffies));
4242 	raw_spin_lock_rcu_node(my_rnp); /* irqs already disabled. */
4243 	/* Leverage recent GPs and set GP for new callbacks. */
4244 	needwake = rcu_advance_cbs(my_rnp, rdp) ||
4245 		   rcu_advance_cbs(my_rnp, my_rdp);
4246 	rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist);
4247 	needwake = needwake || rcu_advance_cbs(my_rnp, my_rdp);
4248 	rcu_segcblist_disable(&rdp->cblist);
4249 	WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) !=
4250 		     !rcu_segcblist_n_cbs(&my_rdp->cblist));
4251 	if (rcu_segcblist_is_offloaded(&my_rdp->cblist)) {
4252 		raw_spin_unlock_rcu_node(my_rnp); /* irqs remain disabled. */
4253 		__call_rcu_nocb_wake(my_rdp, true, flags);
4254 	} else {
4255 		rcu_nocb_unlock(my_rdp); /* irqs remain disabled. */
4256 		raw_spin_unlock_irqrestore_rcu_node(my_rnp, flags);
4257 	}
4258 	if (needwake)
4259 		rcu_gp_kthread_wake();
4260 	lockdep_assert_irqs_enabled();
4261 	WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 ||
4262 		  !rcu_segcblist_empty(&rdp->cblist),
4263 		  "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, 1stCB=%p\n",
4264 		  cpu, rcu_segcblist_n_cbs(&rdp->cblist),
4265 		  rcu_segcblist_first_cb(&rdp->cblist));
4266 }
4267 #endif
4268 
4269 /*
4270  * On non-huge systems, use expedited RCU grace periods to make suspend
4271  * and hibernation run faster.
4272  */
rcu_pm_notify(struct notifier_block * self,unsigned long action,void * hcpu)4273 static int rcu_pm_notify(struct notifier_block *self,
4274 			 unsigned long action, void *hcpu)
4275 {
4276 	switch (action) {
4277 	case PM_HIBERNATION_PREPARE:
4278 	case PM_SUSPEND_PREPARE:
4279 		rcu_expedite_gp();
4280 		break;
4281 	case PM_POST_HIBERNATION:
4282 	case PM_POST_SUSPEND:
4283 		rcu_unexpedite_gp();
4284 		break;
4285 	default:
4286 		break;
4287 	}
4288 	return NOTIFY_OK;
4289 }
4290 
4291 /*
4292  * Spawn the kthreads that handle RCU's grace periods.
4293  */
rcu_spawn_gp_kthread(void)4294 static int __init rcu_spawn_gp_kthread(void)
4295 {
4296 	unsigned long flags;
4297 	int kthread_prio_in = kthread_prio;
4298 	struct rcu_node *rnp;
4299 	struct sched_param sp;
4300 	struct task_struct *t;
4301 
4302 	/* Force priority into range. */
4303 	if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 2
4304 	    && IS_BUILTIN(CONFIG_RCU_TORTURE_TEST))
4305 		kthread_prio = 2;
4306 	else if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
4307 		kthread_prio = 1;
4308 	else if (kthread_prio < 0)
4309 		kthread_prio = 0;
4310 	else if (kthread_prio > 99)
4311 		kthread_prio = 99;
4312 
4313 	if (kthread_prio != kthread_prio_in)
4314 		pr_alert("rcu_spawn_gp_kthread(): Limited prio to %d from %d\n",
4315 			 kthread_prio, kthread_prio_in);
4316 
4317 	rcu_scheduler_fully_active = 1;
4318 	t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name);
4319 	if (WARN_ONCE(IS_ERR(t), "%s: Could not start grace-period kthread, OOM is now expected behavior\n", __func__))
4320 		return 0;
4321 	if (kthread_prio) {
4322 		sp.sched_priority = kthread_prio;
4323 		sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
4324 	}
4325 	rnp = rcu_get_root();
4326 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
4327 	WRITE_ONCE(rcu_state.gp_activity, jiffies);
4328 	WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
4329 	// Reset .gp_activity and .gp_req_activity before setting .gp_kthread.
4330 	smp_store_release(&rcu_state.gp_kthread, t);  /* ^^^ */
4331 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4332 	wake_up_process(t);
4333 	rcu_spawn_nocb_kthreads();
4334 	rcu_spawn_boost_kthreads();
4335 	rcu_spawn_core_kthreads();
4336 	return 0;
4337 }
4338 early_initcall(rcu_spawn_gp_kthread);
4339 
4340 /*
4341  * This function is invoked towards the end of the scheduler's
4342  * initialization process.  Before this is called, the idle task might
4343  * contain synchronous grace-period primitives (during which time, this idle
4344  * task is booting the system, and such primitives are no-ops).  After this
4345  * function is called, any synchronous grace-period primitives are run as
4346  * expedited, with the requesting task driving the grace period forward.
4347  * A later core_initcall() rcu_set_runtime_mode() will switch to full
4348  * runtime RCU functionality.
4349  */
rcu_scheduler_starting(void)4350 void rcu_scheduler_starting(void)
4351 {
4352 	WARN_ON(num_online_cpus() != 1);
4353 	WARN_ON(nr_context_switches() > 0);
4354 	rcu_test_sync_prims();
4355 	rcu_scheduler_active = RCU_SCHEDULER_INIT;
4356 	rcu_test_sync_prims();
4357 }
4358 
4359 /*
4360  * Helper function for rcu_init() that initializes the rcu_state structure.
4361  */
rcu_init_one(void)4362 static void __init rcu_init_one(void)
4363 {
4364 	static const char * const buf[] = RCU_NODE_NAME_INIT;
4365 	static const char * const fqs[] = RCU_FQS_NAME_INIT;
4366 	static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
4367 	static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
4368 
4369 	int levelspread[RCU_NUM_LVLS];		/* kids/node in each level. */
4370 	int cpustride = 1;
4371 	int i;
4372 	int j;
4373 	struct rcu_node *rnp;
4374 
4375 	BUILD_BUG_ON(RCU_NUM_LVLS > ARRAY_SIZE(buf));  /* Fix buf[] init! */
4376 
4377 	/* Silence gcc 4.8 false positive about array index out of range. */
4378 	if (rcu_num_lvls <= 0 || rcu_num_lvls > RCU_NUM_LVLS)
4379 		panic("rcu_init_one: rcu_num_lvls out of range");
4380 
4381 	/* Initialize the level-tracking arrays. */
4382 
4383 	for (i = 1; i < rcu_num_lvls; i++)
4384 		rcu_state.level[i] =
4385 			rcu_state.level[i - 1] + num_rcu_lvl[i - 1];
4386 	rcu_init_levelspread(levelspread, num_rcu_lvl);
4387 
4388 	/* Initialize the elements themselves, starting from the leaves. */
4389 
4390 	for (i = rcu_num_lvls - 1; i >= 0; i--) {
4391 		cpustride *= levelspread[i];
4392 		rnp = rcu_state.level[i];
4393 		for (j = 0; j < num_rcu_lvl[i]; j++, rnp++) {
4394 			raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock));
4395 			lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock),
4396 						   &rcu_node_class[i], buf[i]);
4397 			raw_spin_lock_init(&rnp->fqslock);
4398 			lockdep_set_class_and_name(&rnp->fqslock,
4399 						   &rcu_fqs_class[i], fqs[i]);
4400 			rnp->gp_seq = rcu_state.gp_seq;
4401 			rnp->gp_seq_needed = rcu_state.gp_seq;
4402 			rnp->completedqs = rcu_state.gp_seq;
4403 			rnp->qsmask = 0;
4404 			rnp->qsmaskinit = 0;
4405 			rnp->grplo = j * cpustride;
4406 			rnp->grphi = (j + 1) * cpustride - 1;
4407 			if (rnp->grphi >= nr_cpu_ids)
4408 				rnp->grphi = nr_cpu_ids - 1;
4409 			if (i == 0) {
4410 				rnp->grpnum = 0;
4411 				rnp->grpmask = 0;
4412 				rnp->parent = NULL;
4413 			} else {
4414 				rnp->grpnum = j % levelspread[i - 1];
4415 				rnp->grpmask = BIT(rnp->grpnum);
4416 				rnp->parent = rcu_state.level[i - 1] +
4417 					      j / levelspread[i - 1];
4418 			}
4419 			rnp->level = i;
4420 			INIT_LIST_HEAD(&rnp->blkd_tasks);
4421 			rcu_init_one_nocb(rnp);
4422 			init_waitqueue_head(&rnp->exp_wq[0]);
4423 			init_waitqueue_head(&rnp->exp_wq[1]);
4424 			init_waitqueue_head(&rnp->exp_wq[2]);
4425 			init_waitqueue_head(&rnp->exp_wq[3]);
4426 			spin_lock_init(&rnp->exp_lock);
4427 		}
4428 	}
4429 
4430 	init_swait_queue_head(&rcu_state.gp_wq);
4431 	init_swait_queue_head(&rcu_state.expedited_wq);
4432 	rnp = rcu_first_leaf_node();
4433 	for_each_possible_cpu(i) {
4434 		while (i > rnp->grphi)
4435 			rnp++;
4436 		per_cpu_ptr(&rcu_data, i)->mynode = rnp;
4437 		rcu_boot_init_percpu_data(i);
4438 	}
4439 }
4440 
4441 /*
4442  * Compute the rcu_node tree geometry from kernel parameters.  This cannot
4443  * replace the definitions in tree.h because those are needed to size
4444  * the ->node array in the rcu_state structure.
4445  */
rcu_init_geometry(void)4446 void rcu_init_geometry(void)
4447 {
4448 	ulong d;
4449 	int i;
4450 	static unsigned long old_nr_cpu_ids;
4451 	int rcu_capacity[RCU_NUM_LVLS];
4452 	static bool initialized;
4453 
4454 	if (initialized) {
4455 		/*
4456 		 * Warn if setup_nr_cpu_ids() had not yet been invoked,
4457 		 * unless nr_cpus_ids == NR_CPUS, in which case who cares?
4458 		 */
4459 		WARN_ON_ONCE(old_nr_cpu_ids != nr_cpu_ids);
4460 		return;
4461 	}
4462 
4463 	old_nr_cpu_ids = nr_cpu_ids;
4464 	initialized = true;
4465 
4466 	/*
4467 	 * Initialize any unspecified boot parameters.
4468 	 * The default values of jiffies_till_first_fqs and
4469 	 * jiffies_till_next_fqs are set to the RCU_JIFFIES_TILL_FORCE_QS
4470 	 * value, which is a function of HZ, then adding one for each
4471 	 * RCU_JIFFIES_FQS_DIV CPUs that might be on the system.
4472 	 */
4473 	d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
4474 	if (jiffies_till_first_fqs == ULONG_MAX)
4475 		jiffies_till_first_fqs = d;
4476 	if (jiffies_till_next_fqs == ULONG_MAX)
4477 		jiffies_till_next_fqs = d;
4478 	adjust_jiffies_till_sched_qs();
4479 
4480 	/* If the compile-time values are accurate, just leave. */
4481 	if (rcu_fanout_leaf == RCU_FANOUT_LEAF &&
4482 	    nr_cpu_ids == NR_CPUS)
4483 		return;
4484 	pr_info("Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n",
4485 		rcu_fanout_leaf, nr_cpu_ids);
4486 
4487 	/*
4488 	 * The boot-time rcu_fanout_leaf parameter must be at least two
4489 	 * and cannot exceed the number of bits in the rcu_node masks.
4490 	 * Complain and fall back to the compile-time values if this
4491 	 * limit is exceeded.
4492 	 */
4493 	if (rcu_fanout_leaf < 2 ||
4494 	    rcu_fanout_leaf > sizeof(unsigned long) * 8) {
4495 		rcu_fanout_leaf = RCU_FANOUT_LEAF;
4496 		WARN_ON(1);
4497 		return;
4498 	}
4499 
4500 	/*
4501 	 * Compute number of nodes that can be handled an rcu_node tree
4502 	 * with the given number of levels.
4503 	 */
4504 	rcu_capacity[0] = rcu_fanout_leaf;
4505 	for (i = 1; i < RCU_NUM_LVLS; i++)
4506 		rcu_capacity[i] = rcu_capacity[i - 1] * RCU_FANOUT;
4507 
4508 	/*
4509 	 * The tree must be able to accommodate the configured number of CPUs.
4510 	 * If this limit is exceeded, fall back to the compile-time values.
4511 	 */
4512 	if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1]) {
4513 		rcu_fanout_leaf = RCU_FANOUT_LEAF;
4514 		WARN_ON(1);
4515 		return;
4516 	}
4517 
4518 	/* Calculate the number of levels in the tree. */
4519 	for (i = 0; nr_cpu_ids > rcu_capacity[i]; i++) {
4520 	}
4521 	rcu_num_lvls = i + 1;
4522 
4523 	/* Calculate the number of rcu_nodes at each level of the tree. */
4524 	for (i = 0; i < rcu_num_lvls; i++) {
4525 		int cap = rcu_capacity[(rcu_num_lvls - 1) - i];
4526 		num_rcu_lvl[i] = DIV_ROUND_UP(nr_cpu_ids, cap);
4527 	}
4528 
4529 	/* Calculate the total number of rcu_node structures. */
4530 	rcu_num_nodes = 0;
4531 	for (i = 0; i < rcu_num_lvls; i++)
4532 		rcu_num_nodes += num_rcu_lvl[i];
4533 }
4534 
4535 /*
4536  * Dump out the structure of the rcu_node combining tree associated
4537  * with the rcu_state structure.
4538  */
rcu_dump_rcu_node_tree(void)4539 static void __init rcu_dump_rcu_node_tree(void)
4540 {
4541 	int level = 0;
4542 	struct rcu_node *rnp;
4543 
4544 	pr_info("rcu_node tree layout dump\n");
4545 	pr_info(" ");
4546 	rcu_for_each_node_breadth_first(rnp) {
4547 		if (rnp->level != level) {
4548 			pr_cont("\n");
4549 			pr_info(" ");
4550 			level = rnp->level;
4551 		}
4552 		pr_cont("%d:%d ^%d  ", rnp->grplo, rnp->grphi, rnp->grpnum);
4553 	}
4554 	pr_cont("\n");
4555 }
4556 
4557 struct workqueue_struct *rcu_gp_wq;
4558 struct workqueue_struct *rcu_par_gp_wq;
4559 
kfree_rcu_batch_init(void)4560 static void __init kfree_rcu_batch_init(void)
4561 {
4562 	int cpu;
4563 	int i;
4564 
4565 	for_each_possible_cpu(cpu) {
4566 		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
4567 
4568 		for (i = 0; i < KFREE_N_BATCHES; i++) {
4569 			INIT_RCU_WORK(&krcp->krw_arr[i].rcu_work, kfree_rcu_work);
4570 			krcp->krw_arr[i].krcp = krcp;
4571 		}
4572 
4573 		INIT_DELAYED_WORK(&krcp->monitor_work, kfree_rcu_monitor);
4574 		INIT_WORK(&krcp->page_cache_work, fill_page_cache_func);
4575 		krcp->initialized = true;
4576 	}
4577 	if (register_shrinker(&kfree_rcu_shrinker))
4578 		pr_err("Failed to register kfree_rcu() shrinker!\n");
4579 }
4580 
rcu_init(void)4581 void __init rcu_init(void)
4582 {
4583 	int cpu;
4584 
4585 	rcu_early_boot_tests();
4586 
4587 	kfree_rcu_batch_init();
4588 	rcu_bootup_announce();
4589 	rcu_init_geometry();
4590 	rcu_init_one();
4591 	if (dump_tree)
4592 		rcu_dump_rcu_node_tree();
4593 	if (use_softirq)
4594 		open_softirq(RCU_SOFTIRQ, rcu_core_si);
4595 
4596 	/*
4597 	 * We don't need protection against CPU-hotplug here because
4598 	 * this is called early in boot, before either interrupts
4599 	 * or the scheduler are operational.
4600 	 */
4601 	pm_notifier(rcu_pm_notify, 0);
4602 	for_each_online_cpu(cpu) {
4603 		rcutree_prepare_cpu(cpu);
4604 		rcu_cpu_starting(cpu);
4605 		rcutree_online_cpu(cpu);
4606 	}
4607 
4608 	/* Create workqueue for expedited GPs and for Tree SRCU. */
4609 	rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM, 0);
4610 	WARN_ON(!rcu_gp_wq);
4611 	rcu_par_gp_wq = alloc_workqueue("rcu_par_gp", WQ_MEM_RECLAIM, 0);
4612 	WARN_ON(!rcu_par_gp_wq);
4613 	srcu_init();
4614 
4615 	/* Fill in default value for rcutree.qovld boot parameter. */
4616 	/* -After- the rcu_node ->lock fields are initialized! */
4617 	if (qovld < 0)
4618 		qovld_calc = DEFAULT_RCU_QOVLD_MULT * qhimark;
4619 	else
4620 		qovld_calc = qovld;
4621 }
4622 
4623 #include "tree_stall.h"
4624 #include "tree_exp.h"
4625 #include "tree_plugin.h"
4626