• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Read-Copy Update mechanism for mutual exclusion (tree-based version)
4  *
5  * Copyright IBM Corporation, 2008
6  *
7  * Authors: Dipankar Sarma <dipankar@in.ibm.com>
8  *	    Manfred Spraul <manfred@colorfullife.com>
9  *	    Paul E. McKenney <paulmck@linux.ibm.com>
10  *
11  * Based on the original work by Paul McKenney <paulmck@linux.ibm.com>
12  * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
13  *
14  * For detailed explanation of Read-Copy Update mechanism see -
15  *	Documentation/RCU
16  */
17 
18 #define pr_fmt(fmt) "rcu: " fmt
19 
20 #include <linux/types.h>
21 #include <linux/kernel.h>
22 #include <linux/init.h>
23 #include <linux/spinlock.h>
24 #include <linux/smp.h>
25 #include <linux/rcupdate_wait.h>
26 #include <linux/interrupt.h>
27 #include <linux/sched.h>
28 #include <linux/sched/debug.h>
29 #include <linux/nmi.h>
30 #include <linux/atomic.h>
31 #include <linux/bitops.h>
32 #include <linux/export.h>
33 #include <linux/completion.h>
34 #include <linux/moduleparam.h>
35 #include <linux/percpu.h>
36 #include <linux/notifier.h>
37 #include <linux/cpu.h>
38 #include <linux/mutex.h>
39 #include <linux/time.h>
40 #include <linux/kernel_stat.h>
41 #include <linux/wait.h>
42 #include <linux/kthread.h>
43 #include <uapi/linux/sched/types.h>
44 #include <linux/prefetch.h>
45 #include <linux/delay.h>
46 #include <linux/random.h>
47 #include <linux/trace_events.h>
48 #include <linux/suspend.h>
49 #include <linux/ftrace.h>
50 #include <linux/tick.h>
51 #include <linux/sysrq.h>
52 #include <linux/kprobes.h>
53 #include <linux/gfp.h>
54 #include <linux/oom.h>
55 #include <linux/smpboot.h>
56 #include <linux/jiffies.h>
57 #include <linux/slab.h>
58 #include <linux/sched/isolation.h>
59 #include <linux/sched/clock.h>
60 #include <linux/vmalloc.h>
61 #include <linux/mm.h>
62 #include <linux/kasan.h>
63 #include "../time/tick-internal.h"
64 
65 #include "tree.h"
66 #include "rcu.h"
67 
68 #ifdef MODULE_PARAM_PREFIX
69 #undef MODULE_PARAM_PREFIX
70 #endif
71 #define MODULE_PARAM_PREFIX "rcutree."
72 
73 /* Data structures. */
74 
75 /*
76  * Steal a bit from the bottom of ->dynticks for idle entry/exit
77  * control.  Initially this is for TLB flushing.
78  */
79 #define RCU_DYNTICK_CTRL_MASK 0x1
80 #define RCU_DYNTICK_CTRL_CTR  (RCU_DYNTICK_CTRL_MASK + 1)
81 
82 static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = {
83 	.dynticks_nesting = 1,
84 	.dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE,
85 	.dynticks = ATOMIC_INIT(RCU_DYNTICK_CTRL_CTR),
86 };
87 static struct rcu_state rcu_state = {
88 	.level = { &rcu_state.node[0] },
89 	.gp_state = RCU_GP_IDLE,
90 	.gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT,
91 	.barrier_mutex = __MUTEX_INITIALIZER(rcu_state.barrier_mutex),
92 	.name = RCU_NAME,
93 	.abbr = RCU_ABBR,
94 	.exp_mutex = __MUTEX_INITIALIZER(rcu_state.exp_mutex),
95 	.exp_wake_mutex = __MUTEX_INITIALIZER(rcu_state.exp_wake_mutex),
96 	.ofl_lock = __RAW_SPIN_LOCK_UNLOCKED(rcu_state.ofl_lock),
97 };
98 
99 /* Dump rcu_node combining tree at boot to verify correct setup. */
100 static bool dump_tree;
101 module_param(dump_tree, bool, 0444);
102 /* By default, use RCU_SOFTIRQ instead of rcuc kthreads. */
103 static bool use_softirq = true;
104 module_param(use_softirq, bool, 0444);
105 /* Control rcu_node-tree auto-balancing at boot time. */
106 static bool rcu_fanout_exact;
107 module_param(rcu_fanout_exact, bool, 0444);
108 /* Increase (but not decrease) the RCU_FANOUT_LEAF at boot time. */
109 static int rcu_fanout_leaf = RCU_FANOUT_LEAF;
110 module_param(rcu_fanout_leaf, int, 0444);
111 int rcu_num_lvls __read_mostly = RCU_NUM_LVLS;
112 /* Number of rcu_nodes at specified level. */
113 int num_rcu_lvl[] = NUM_RCU_LVL_INIT;
114 int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
115 
116 /*
117  * The rcu_scheduler_active variable is initialized to the value
118  * RCU_SCHEDULER_INACTIVE and transitions RCU_SCHEDULER_INIT just before the
119  * first task is spawned.  So when this variable is RCU_SCHEDULER_INACTIVE,
120  * RCU can assume that there is but one task, allowing RCU to (for example)
121  * optimize synchronize_rcu() to a simple barrier().  When this variable
122  * is RCU_SCHEDULER_INIT, RCU must actually do all the hard work required
123  * to detect real grace periods.  This variable is also used to suppress
124  * boot-time false positives from lockdep-RCU error checking.  Finally, it
125  * transitions from RCU_SCHEDULER_INIT to RCU_SCHEDULER_RUNNING after RCU
126  * is fully initialized, including all of its kthreads having been spawned.
127  */
128 int rcu_scheduler_active __read_mostly;
129 EXPORT_SYMBOL_GPL(rcu_scheduler_active);
130 
131 /*
132  * The rcu_scheduler_fully_active variable transitions from zero to one
133  * during the early_initcall() processing, which is after the scheduler
134  * is capable of creating new tasks.  So RCU processing (for example,
135  * creating tasks for RCU priority boosting) must be delayed until after
136  * rcu_scheduler_fully_active transitions from zero to one.  We also
137  * currently delay invocation of any RCU callbacks until after this point.
138  *
139  * It might later prove better for people registering RCU callbacks during
140  * early boot to take responsibility for these callbacks, but one step at
141  * a time.
142  */
143 static int rcu_scheduler_fully_active __read_mostly;
144 
145 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
146 			      unsigned long gps, unsigned long flags);
147 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
148 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
149 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
150 static void invoke_rcu_core(void);
151 static void rcu_report_exp_rdp(struct rcu_data *rdp);
152 static void sync_sched_exp_online_cleanup(int cpu);
153 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp);
154 
155 /* rcuc/rcub kthread realtime priority */
156 static int kthread_prio = IS_ENABLED(CONFIG_RCU_BOOST) ? 1 : 0;
157 module_param(kthread_prio, int, 0444);
158 
159 /* Delay in jiffies for grace-period initialization delays, debug only. */
160 
161 static int gp_preinit_delay;
162 module_param(gp_preinit_delay, int, 0444);
163 static int gp_init_delay;
164 module_param(gp_init_delay, int, 0444);
165 static int gp_cleanup_delay;
166 module_param(gp_cleanup_delay, int, 0444);
167 
168 // Add delay to rcu_read_unlock() for strict grace periods.
169 static int rcu_unlock_delay;
170 #ifdef CONFIG_RCU_STRICT_GRACE_PERIOD
171 module_param(rcu_unlock_delay, int, 0444);
172 #endif
173 
174 /*
175  * This rcu parameter is runtime-read-only. It reflects
176  * a minimum allowed number of objects which can be cached
177  * per-CPU. Object size is equal to one page. This value
178  * can be changed at boot time.
179  */
180 static int rcu_min_cached_objs = 5;
181 module_param(rcu_min_cached_objs, int, 0444);
182 
183 /* Retrieve RCU kthreads priority for rcutorture */
rcu_get_gp_kthreads_prio(void)184 int rcu_get_gp_kthreads_prio(void)
185 {
186 	return kthread_prio;
187 }
188 EXPORT_SYMBOL_GPL(rcu_get_gp_kthreads_prio);
189 
190 /*
191  * Number of grace periods between delays, normalized by the duration of
192  * the delay.  The longer the delay, the more the grace periods between
193  * each delay.  The reason for this normalization is that it means that,
194  * for non-zero delays, the overall slowdown of grace periods is constant
195  * regardless of the duration of the delay.  This arrangement balances
196  * the need for long delays to increase some race probabilities with the
197  * need for fast grace periods to increase other race probabilities.
198  */
199 #define PER_RCU_NODE_PERIOD 3	/* Number of grace periods between delays. */
200 
201 /*
202  * Compute the mask of online CPUs for the specified rcu_node structure.
203  * This will not be stable unless the rcu_node structure's ->lock is
204  * held, but the bit corresponding to the current CPU will be stable
205  * in most contexts.
206  */
rcu_rnp_online_cpus(struct rcu_node * rnp)207 static unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
208 {
209 	return READ_ONCE(rnp->qsmaskinitnext);
210 }
211 
212 /*
213  * Return true if an RCU grace period is in progress.  The READ_ONCE()s
214  * permit this function to be invoked without holding the root rcu_node
215  * structure's ->lock, but of course results can be subject to change.
216  */
rcu_gp_in_progress(void)217 static int rcu_gp_in_progress(void)
218 {
219 	return rcu_seq_state(rcu_seq_current(&rcu_state.gp_seq));
220 }
221 
222 /*
223  * Return the number of callbacks queued on the specified CPU.
224  * Handles both the nocbs and normal cases.
225  */
rcu_get_n_cbs_cpu(int cpu)226 static long rcu_get_n_cbs_cpu(int cpu)
227 {
228 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
229 
230 	if (rcu_segcblist_is_enabled(&rdp->cblist))
231 		return rcu_segcblist_n_cbs(&rdp->cblist);
232 	return 0;
233 }
234 
rcu_softirq_qs(void)235 void rcu_softirq_qs(void)
236 {
237 	rcu_qs();
238 	rcu_preempt_deferred_qs(current);
239 }
240 
241 /*
242  * Record entry into an extended quiescent state.  This is only to be
243  * called when not already in an extended quiescent state, that is,
244  * RCU is watching prior to the call to this function and is no longer
245  * watching upon return.
246  */
rcu_dynticks_eqs_enter(void)247 static noinstr void rcu_dynticks_eqs_enter(void)
248 {
249 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
250 	int seq;
251 
252 	/*
253 	 * CPUs seeing atomic_add_return() must see prior RCU read-side
254 	 * critical sections, and we also must force ordering with the
255 	 * next idle sojourn.
256 	 */
257 	rcu_dynticks_task_trace_enter();  // Before ->dynticks update!
258 	seq = arch_atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
259 	// RCU is no longer watching.  Better be in extended quiescent state!
260 	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
261 		     (seq & RCU_DYNTICK_CTRL_CTR));
262 	/* Better not have special action (TLB flush) pending! */
263 	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
264 		     (seq & RCU_DYNTICK_CTRL_MASK));
265 }
266 
267 /*
268  * Record exit from an extended quiescent state.  This is only to be
269  * called from an extended quiescent state, that is, RCU is not watching
270  * prior to the call to this function and is watching upon return.
271  */
rcu_dynticks_eqs_exit(void)272 static noinstr void rcu_dynticks_eqs_exit(void)
273 {
274 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
275 	int seq;
276 
277 	/*
278 	 * CPUs seeing atomic_add_return() must see prior idle sojourns,
279 	 * and we also must force ordering with the next RCU read-side
280 	 * critical section.
281 	 */
282 	seq = arch_atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
283 	// RCU is now watching.  Better not be in an extended quiescent state!
284 	rcu_dynticks_task_trace_exit();  // After ->dynticks update!
285 	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
286 		     !(seq & RCU_DYNTICK_CTRL_CTR));
287 	if (seq & RCU_DYNTICK_CTRL_MASK) {
288 		arch_atomic_andnot(RCU_DYNTICK_CTRL_MASK, &rdp->dynticks);
289 		smp_mb__after_atomic(); /* _exit after clearing mask. */
290 	}
291 }
292 
293 /*
294  * Reset the current CPU's ->dynticks counter to indicate that the
295  * newly onlined CPU is no longer in an extended quiescent state.
296  * This will either leave the counter unchanged, or increment it
297  * to the next non-quiescent value.
298  *
299  * The non-atomic test/increment sequence works because the upper bits
300  * of the ->dynticks counter are manipulated only by the corresponding CPU,
301  * or when the corresponding CPU is offline.
302  */
rcu_dynticks_eqs_online(void)303 static void rcu_dynticks_eqs_online(void)
304 {
305 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
306 
307 	if (atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR)
308 		return;
309 	atomic_add(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
310 }
311 
312 /*
313  * Is the current CPU in an extended quiescent state?
314  *
315  * No ordering, as we are sampling CPU-local information.
316  */
rcu_dynticks_curr_cpu_in_eqs(void)317 static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void)
318 {
319 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
320 
321 	return !(arch_atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR);
322 }
323 
324 /*
325  * Snapshot the ->dynticks counter with full ordering so as to allow
326  * stable comparison of this counter with past and future snapshots.
327  */
rcu_dynticks_snap(struct rcu_data * rdp)328 static int rcu_dynticks_snap(struct rcu_data *rdp)
329 {
330 	int snap = atomic_add_return(0, &rdp->dynticks);
331 
332 	return snap & ~RCU_DYNTICK_CTRL_MASK;
333 }
334 
335 /*
336  * Return true if the snapshot returned from rcu_dynticks_snap()
337  * indicates that RCU is in an extended quiescent state.
338  */
rcu_dynticks_in_eqs(int snap)339 static bool rcu_dynticks_in_eqs(int snap)
340 {
341 	return !(snap & RCU_DYNTICK_CTRL_CTR);
342 }
343 
344 /*
345  * Return true if the CPU corresponding to the specified rcu_data
346  * structure has spent some time in an extended quiescent state since
347  * rcu_dynticks_snap() returned the specified snapshot.
348  */
rcu_dynticks_in_eqs_since(struct rcu_data * rdp,int snap)349 static bool rcu_dynticks_in_eqs_since(struct rcu_data *rdp, int snap)
350 {
351 	return snap != rcu_dynticks_snap(rdp);
352 }
353 
354 /*
355  * Return true if the referenced integer is zero while the specified
356  * CPU remains within a single extended quiescent state.
357  */
rcu_dynticks_zero_in_eqs(int cpu,int * vp)358 bool rcu_dynticks_zero_in_eqs(int cpu, int *vp)
359 {
360 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
361 	int snap;
362 
363 	// If not quiescent, force back to earlier extended quiescent state.
364 	snap = atomic_read(&rdp->dynticks) & ~(RCU_DYNTICK_CTRL_MASK |
365 					       RCU_DYNTICK_CTRL_CTR);
366 
367 	smp_rmb(); // Order ->dynticks and *vp reads.
368 	if (READ_ONCE(*vp))
369 		return false;  // Non-zero, so report failure;
370 	smp_rmb(); // Order *vp read and ->dynticks re-read.
371 
372 	// If still in the same extended quiescent state, we are good!
373 	return snap == (atomic_read(&rdp->dynticks) & ~RCU_DYNTICK_CTRL_MASK);
374 }
375 
376 /*
377  * Set the special (bottom) bit of the specified CPU so that it
378  * will take special action (such as flushing its TLB) on the
379  * next exit from an extended quiescent state.  Returns true if
380  * the bit was successfully set, or false if the CPU was not in
381  * an extended quiescent state.
382  */
rcu_eqs_special_set(int cpu)383 bool rcu_eqs_special_set(int cpu)
384 {
385 	int old;
386 	int new;
387 	int new_old;
388 	struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
389 
390 	new_old = atomic_read(&rdp->dynticks);
391 	do {
392 		old = new_old;
393 		if (old & RCU_DYNTICK_CTRL_CTR)
394 			return false;
395 		new = old | RCU_DYNTICK_CTRL_MASK;
396 		new_old = atomic_cmpxchg(&rdp->dynticks, old, new);
397 	} while (new_old != old);
398 	return true;
399 }
400 
401 /*
402  * Let the RCU core know that this CPU has gone through the scheduler,
403  * which is a quiescent state.  This is called when the need for a
404  * quiescent state is urgent, so we burn an atomic operation and full
405  * memory barriers to let the RCU core know about it, regardless of what
406  * this CPU might (or might not) do in the near future.
407  *
408  * We inform the RCU core by emulating a zero-duration dyntick-idle period.
409  *
410  * The caller must have disabled interrupts and must not be idle.
411  */
rcu_momentary_dyntick_idle(void)412 notrace void rcu_momentary_dyntick_idle(void)
413 {
414 	int special;
415 
416 	raw_cpu_write(rcu_data.rcu_need_heavy_qs, false);
417 	special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR,
418 				    &this_cpu_ptr(&rcu_data)->dynticks);
419 	/* It is illegal to call this from idle state. */
420 	WARN_ON_ONCE(!(special & RCU_DYNTICK_CTRL_CTR));
421 	rcu_preempt_deferred_qs(current);
422 }
423 EXPORT_SYMBOL_GPL(rcu_momentary_dyntick_idle);
424 
425 /**
426  * rcu_is_cpu_rrupt_from_idle - see if 'interrupted' from idle
427  *
428  * If the current CPU is idle and running at a first-level (not nested)
429  * interrupt, or directly, from idle, return true.
430  *
431  * The caller must have at least disabled IRQs.
432  */
rcu_is_cpu_rrupt_from_idle(void)433 static int rcu_is_cpu_rrupt_from_idle(void)
434 {
435 	long nesting;
436 
437 	/*
438 	 * Usually called from the tick; but also used from smp_function_call()
439 	 * for expedited grace periods. This latter can result in running from
440 	 * the idle task, instead of an actual IPI.
441 	 */
442 	lockdep_assert_irqs_disabled();
443 
444 	/* Check for counter underflows */
445 	RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) < 0,
446 			 "RCU dynticks_nesting counter underflow!");
447 	RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) <= 0,
448 			 "RCU dynticks_nmi_nesting counter underflow/zero!");
449 
450 	/* Are we at first interrupt nesting level? */
451 	nesting = __this_cpu_read(rcu_data.dynticks_nmi_nesting);
452 	if (nesting > 1)
453 		return false;
454 
455 	/*
456 	 * If we're not in an interrupt, we must be in the idle task!
457 	 */
458 	WARN_ON_ONCE(!nesting && !is_idle_task(current));
459 
460 	/* Does CPU appear to be idle from an RCU standpoint? */
461 	return __this_cpu_read(rcu_data.dynticks_nesting) == 0;
462 }
463 
464 #define DEFAULT_RCU_BLIMIT (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 1000 : 10)
465 				// Maximum callbacks per rcu_do_batch ...
466 #define DEFAULT_MAX_RCU_BLIMIT 10000 // ... even during callback flood.
467 static long blimit = DEFAULT_RCU_BLIMIT;
468 #define DEFAULT_RCU_QHIMARK 10000 // If this many pending, ignore blimit.
469 static long qhimark = DEFAULT_RCU_QHIMARK;
470 #define DEFAULT_RCU_QLOMARK 100   // Once only this many pending, use blimit.
471 static long qlowmark = DEFAULT_RCU_QLOMARK;
472 #define DEFAULT_RCU_QOVLD_MULT 2
473 #define DEFAULT_RCU_QOVLD (DEFAULT_RCU_QOVLD_MULT * DEFAULT_RCU_QHIMARK)
474 static long qovld = DEFAULT_RCU_QOVLD; // If this many pending, hammer QS.
475 static long qovld_calc = -1;	  // No pre-initialization lock acquisitions!
476 
477 module_param(blimit, long, 0444);
478 module_param(qhimark, long, 0444);
479 module_param(qlowmark, long, 0444);
480 module_param(qovld, long, 0444);
481 
482 static ulong jiffies_till_first_fqs = IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 0 : ULONG_MAX;
483 static ulong jiffies_till_next_fqs = ULONG_MAX;
484 static bool rcu_kick_kthreads;
485 static int rcu_divisor = 7;
486 module_param(rcu_divisor, int, 0644);
487 
488 /* Force an exit from rcu_do_batch() after 3 milliseconds. */
489 static long rcu_resched_ns = 3 * NSEC_PER_MSEC;
490 module_param(rcu_resched_ns, long, 0644);
491 
492 /*
493  * How long the grace period must be before we start recruiting
494  * quiescent-state help from rcu_note_context_switch().
495  */
496 static ulong jiffies_till_sched_qs = ULONG_MAX;
497 module_param(jiffies_till_sched_qs, ulong, 0444);
498 static ulong jiffies_to_sched_qs; /* See adjust_jiffies_till_sched_qs(). */
499 module_param(jiffies_to_sched_qs, ulong, 0444); /* Display only! */
500 
501 /*
502  * Make sure that we give the grace-period kthread time to detect any
503  * idle CPUs before taking active measures to force quiescent states.
504  * However, don't go below 100 milliseconds, adjusted upwards for really
505  * large systems.
506  */
adjust_jiffies_till_sched_qs(void)507 static void adjust_jiffies_till_sched_qs(void)
508 {
509 	unsigned long j;
510 
511 	/* If jiffies_till_sched_qs was specified, respect the request. */
512 	if (jiffies_till_sched_qs != ULONG_MAX) {
513 		WRITE_ONCE(jiffies_to_sched_qs, jiffies_till_sched_qs);
514 		return;
515 	}
516 	/* Otherwise, set to third fqs scan, but bound below on large system. */
517 	j = READ_ONCE(jiffies_till_first_fqs) +
518 		      2 * READ_ONCE(jiffies_till_next_fqs);
519 	if (j < HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV)
520 		j = HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
521 	pr_info("RCU calculated value of scheduler-enlistment delay is %ld jiffies.\n", j);
522 	WRITE_ONCE(jiffies_to_sched_qs, j);
523 }
524 
param_set_first_fqs_jiffies(const char * val,const struct kernel_param * kp)525 static int param_set_first_fqs_jiffies(const char *val, const struct kernel_param *kp)
526 {
527 	ulong j;
528 	int ret = kstrtoul(val, 0, &j);
529 
530 	if (!ret) {
531 		WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : j);
532 		adjust_jiffies_till_sched_qs();
533 	}
534 	return ret;
535 }
536 
param_set_next_fqs_jiffies(const char * val,const struct kernel_param * kp)537 static int param_set_next_fqs_jiffies(const char *val, const struct kernel_param *kp)
538 {
539 	ulong j;
540 	int ret = kstrtoul(val, 0, &j);
541 
542 	if (!ret) {
543 		WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : (j ?: 1));
544 		adjust_jiffies_till_sched_qs();
545 	}
546 	return ret;
547 }
548 
549 static struct kernel_param_ops first_fqs_jiffies_ops = {
550 	.set = param_set_first_fqs_jiffies,
551 	.get = param_get_ulong,
552 };
553 
554 static struct kernel_param_ops next_fqs_jiffies_ops = {
555 	.set = param_set_next_fqs_jiffies,
556 	.get = param_get_ulong,
557 };
558 
559 module_param_cb(jiffies_till_first_fqs, &first_fqs_jiffies_ops, &jiffies_till_first_fqs, 0644);
560 module_param_cb(jiffies_till_next_fqs, &next_fqs_jiffies_ops, &jiffies_till_next_fqs, 0644);
561 module_param(rcu_kick_kthreads, bool, 0644);
562 
563 static void force_qs_rnp(int (*f)(struct rcu_data *rdp));
564 static int rcu_pending(int user);
565 
566 /*
567  * Return the number of RCU GPs completed thus far for debug & stats.
568  */
rcu_get_gp_seq(void)569 unsigned long rcu_get_gp_seq(void)
570 {
571 	return READ_ONCE(rcu_state.gp_seq);
572 }
573 EXPORT_SYMBOL_GPL(rcu_get_gp_seq);
574 
575 /*
576  * Return the number of RCU expedited batches completed thus far for
577  * debug & stats.  Odd numbers mean that a batch is in progress, even
578  * numbers mean idle.  The value returned will thus be roughly double
579  * the cumulative batches since boot.
580  */
rcu_exp_batches_completed(void)581 unsigned long rcu_exp_batches_completed(void)
582 {
583 	return rcu_state.expedited_sequence;
584 }
585 EXPORT_SYMBOL_GPL(rcu_exp_batches_completed);
586 
587 /*
588  * Return the root node of the rcu_state structure.
589  */
rcu_get_root(void)590 static struct rcu_node *rcu_get_root(void)
591 {
592 	return &rcu_state.node[0];
593 }
594 
595 /*
596  * Send along grace-period-related data for rcutorture diagnostics.
597  */
rcutorture_get_gp_data(enum rcutorture_type test_type,int * flags,unsigned long * gp_seq)598 void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
599 			    unsigned long *gp_seq)
600 {
601 	switch (test_type) {
602 	case RCU_FLAVOR:
603 		*flags = READ_ONCE(rcu_state.gp_flags);
604 		*gp_seq = rcu_seq_current(&rcu_state.gp_seq);
605 		break;
606 	default:
607 		break;
608 	}
609 }
610 EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
611 
612 /*
613  * Enter an RCU extended quiescent state, which can be either the
614  * idle loop or adaptive-tickless usermode execution.
615  *
616  * We crowbar the ->dynticks_nmi_nesting field to zero to allow for
617  * the possibility of usermode upcalls having messed up our count
618  * of interrupt nesting level during the prior busy period.
619  */
rcu_eqs_enter(bool user)620 static noinstr void rcu_eqs_enter(bool user)
621 {
622 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
623 
624 	WARN_ON_ONCE(rdp->dynticks_nmi_nesting != DYNTICK_IRQ_NONIDLE);
625 	WRITE_ONCE(rdp->dynticks_nmi_nesting, 0);
626 	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
627 		     rdp->dynticks_nesting == 0);
628 	if (rdp->dynticks_nesting != 1) {
629 		// RCU will still be watching, so just do accounting and leave.
630 		rdp->dynticks_nesting--;
631 		return;
632 	}
633 
634 	lockdep_assert_irqs_disabled();
635 	instrumentation_begin();
636 	trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, atomic_read(&rdp->dynticks));
637 	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
638 	rdp = this_cpu_ptr(&rcu_data);
639 	rcu_prepare_for_idle();
640 	rcu_preempt_deferred_qs(current);
641 
642 	// instrumentation for the noinstr rcu_dynticks_eqs_enter()
643 	instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
644 
645 	instrumentation_end();
646 	WRITE_ONCE(rdp->dynticks_nesting, 0); /* Avoid irq-access tearing. */
647 	// RCU is watching here ...
648 	rcu_dynticks_eqs_enter();
649 	// ... but is no longer watching here.
650 	rcu_dynticks_task_enter();
651 }
652 
653 /**
654  * rcu_idle_enter - inform RCU that current CPU is entering idle
655  *
656  * Enter idle mode, in other words, -leave- the mode in which RCU
657  * read-side critical sections can occur.  (Though RCU read-side
658  * critical sections can occur in irq handlers in idle, a possibility
659  * handled by irq_enter() and irq_exit().)
660  *
661  * If you add or remove a call to rcu_idle_enter(), be sure to test with
662  * CONFIG_RCU_EQS_DEBUG=y.
663  */
rcu_idle_enter(void)664 void rcu_idle_enter(void)
665 {
666 	lockdep_assert_irqs_disabled();
667 	rcu_eqs_enter(false);
668 }
669 EXPORT_SYMBOL_GPL(rcu_idle_enter);
670 
671 #ifdef CONFIG_NO_HZ_FULL
672 /**
673  * rcu_user_enter - inform RCU that we are resuming userspace.
674  *
675  * Enter RCU idle mode right before resuming userspace.  No use of RCU
676  * is permitted between this call and rcu_user_exit(). This way the
677  * CPU doesn't need to maintain the tick for RCU maintenance purposes
678  * when the CPU runs in userspace.
679  *
680  * If you add or remove a call to rcu_user_enter(), be sure to test with
681  * CONFIG_RCU_EQS_DEBUG=y.
682  */
rcu_user_enter(void)683 noinstr void rcu_user_enter(void)
684 {
685 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
686 
687 	lockdep_assert_irqs_disabled();
688 
689 	instrumentation_begin();
690 	do_nocb_deferred_wakeup(rdp);
691 	instrumentation_end();
692 
693 	rcu_eqs_enter(true);
694 }
695 #endif /* CONFIG_NO_HZ_FULL */
696 
697 /**
698  * rcu_nmi_exit - inform RCU of exit from NMI context
699  *
700  * If we are returning from the outermost NMI handler that interrupted an
701  * RCU-idle period, update rdp->dynticks and rdp->dynticks_nmi_nesting
702  * to let the RCU grace-period handling know that the CPU is back to
703  * being RCU-idle.
704  *
705  * If you add or remove a call to rcu_nmi_exit(), be sure to test
706  * with CONFIG_RCU_EQS_DEBUG=y.
707  */
rcu_nmi_exit(void)708 noinstr void rcu_nmi_exit(void)
709 {
710 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
711 
712 	instrumentation_begin();
713 	/*
714 	 * Check for ->dynticks_nmi_nesting underflow and bad ->dynticks.
715 	 * (We are exiting an NMI handler, so RCU better be paying attention
716 	 * to us!)
717 	 */
718 	WARN_ON_ONCE(rdp->dynticks_nmi_nesting <= 0);
719 	WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs());
720 
721 	/*
722 	 * If the nesting level is not 1, the CPU wasn't RCU-idle, so
723 	 * leave it in non-RCU-idle state.
724 	 */
725 	if (rdp->dynticks_nmi_nesting != 1) {
726 		trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2,
727 				  atomic_read(&rdp->dynticks));
728 		WRITE_ONCE(rdp->dynticks_nmi_nesting, /* No store tearing. */
729 			   rdp->dynticks_nmi_nesting - 2);
730 		instrumentation_end();
731 		return;
732 	}
733 
734 	/* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
735 	trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, atomic_read(&rdp->dynticks));
736 	WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */
737 
738 	if (!in_nmi())
739 		rcu_prepare_for_idle();
740 
741 	// instrumentation for the noinstr rcu_dynticks_eqs_enter()
742 	instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
743 	instrumentation_end();
744 
745 	// RCU is watching here ...
746 	rcu_dynticks_eqs_enter();
747 	// ... but is no longer watching here.
748 
749 	if (!in_nmi())
750 		rcu_dynticks_task_enter();
751 }
752 
753 /**
754  * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle
755  *
756  * Exit from an interrupt handler, which might possibly result in entering
757  * idle mode, in other words, leaving the mode in which read-side critical
758  * sections can occur.  The caller must have disabled interrupts.
759  *
760  * This code assumes that the idle loop never does anything that might
761  * result in unbalanced calls to irq_enter() and irq_exit().  If your
762  * architecture's idle loop violates this assumption, RCU will give you what
763  * you deserve, good and hard.  But very infrequently and irreproducibly.
764  *
765  * Use things like work queues to work around this limitation.
766  *
767  * You have been warned.
768  *
769  * If you add or remove a call to rcu_irq_exit(), be sure to test with
770  * CONFIG_RCU_EQS_DEBUG=y.
771  */
rcu_irq_exit(void)772 void noinstr rcu_irq_exit(void)
773 {
774 	lockdep_assert_irqs_disabled();
775 	rcu_nmi_exit();
776 }
777 
778 /**
779  * rcu_irq_exit_preempt - Inform RCU that current CPU is exiting irq
780  *			  towards in kernel preemption
781  *
782  * Same as rcu_irq_exit() but has a sanity check that scheduling is safe
783  * from RCU point of view. Invoked from return from interrupt before kernel
784  * preemption.
785  */
rcu_irq_exit_preempt(void)786 void rcu_irq_exit_preempt(void)
787 {
788 	lockdep_assert_irqs_disabled();
789 	rcu_nmi_exit();
790 
791 	RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) <= 0,
792 			 "RCU dynticks_nesting counter underflow/zero!");
793 	RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) !=
794 			 DYNTICK_IRQ_NONIDLE,
795 			 "Bad RCU  dynticks_nmi_nesting counter\n");
796 	RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
797 			 "RCU in extended quiescent state!");
798 }
799 
800 #ifdef CONFIG_PROVE_RCU
801 /**
802  * rcu_irq_exit_check_preempt - Validate that scheduling is possible
803  */
rcu_irq_exit_check_preempt(void)804 void rcu_irq_exit_check_preempt(void)
805 {
806 	lockdep_assert_irqs_disabled();
807 
808 	RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) <= 0,
809 			 "RCU dynticks_nesting counter underflow/zero!");
810 	RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) !=
811 			 DYNTICK_IRQ_NONIDLE,
812 			 "Bad RCU  dynticks_nmi_nesting counter\n");
813 	RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
814 			 "RCU in extended quiescent state!");
815 }
816 #endif /* #ifdef CONFIG_PROVE_RCU */
817 
818 /*
819  * Wrapper for rcu_irq_exit() where interrupts are enabled.
820  *
821  * If you add or remove a call to rcu_irq_exit_irqson(), be sure to test
822  * with CONFIG_RCU_EQS_DEBUG=y.
823  */
rcu_irq_exit_irqson(void)824 void rcu_irq_exit_irqson(void)
825 {
826 	unsigned long flags;
827 
828 	local_irq_save(flags);
829 	rcu_irq_exit();
830 	local_irq_restore(flags);
831 }
832 
833 /*
834  * Exit an RCU extended quiescent state, which can be either the
835  * idle loop or adaptive-tickless usermode execution.
836  *
837  * We crowbar the ->dynticks_nmi_nesting field to DYNTICK_IRQ_NONIDLE to
838  * allow for the possibility of usermode upcalls messing up our count of
839  * interrupt nesting level during the busy period that is just now starting.
840  */
rcu_eqs_exit(bool user)841 static void noinstr rcu_eqs_exit(bool user)
842 {
843 	struct rcu_data *rdp;
844 	long oldval;
845 
846 	lockdep_assert_irqs_disabled();
847 	rdp = this_cpu_ptr(&rcu_data);
848 	oldval = rdp->dynticks_nesting;
849 	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0);
850 	if (oldval) {
851 		// RCU was already watching, so just do accounting and leave.
852 		rdp->dynticks_nesting++;
853 		return;
854 	}
855 	rcu_dynticks_task_exit();
856 	// RCU is not watching here ...
857 	rcu_dynticks_eqs_exit();
858 	// ... but is watching here.
859 	instrumentation_begin();
860 
861 	// instrumentation for the noinstr rcu_dynticks_eqs_exit()
862 	instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
863 
864 	rcu_cleanup_after_idle();
865 	trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, atomic_read(&rdp->dynticks));
866 	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
867 	WRITE_ONCE(rdp->dynticks_nesting, 1);
868 	WARN_ON_ONCE(rdp->dynticks_nmi_nesting);
869 	WRITE_ONCE(rdp->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE);
870 	instrumentation_end();
871 }
872 
873 /**
874  * rcu_idle_exit - inform RCU that current CPU is leaving idle
875  *
876  * Exit idle mode, in other words, -enter- the mode in which RCU
877  * read-side critical sections can occur.
878  *
879  * If you add or remove a call to rcu_idle_exit(), be sure to test with
880  * CONFIG_RCU_EQS_DEBUG=y.
881  */
rcu_idle_exit(void)882 void rcu_idle_exit(void)
883 {
884 	unsigned long flags;
885 
886 	local_irq_save(flags);
887 	rcu_eqs_exit(false);
888 	local_irq_restore(flags);
889 }
890 EXPORT_SYMBOL_GPL(rcu_idle_exit);
891 
892 #ifdef CONFIG_NO_HZ_FULL
893 /**
894  * rcu_user_exit - inform RCU that we are exiting userspace.
895  *
896  * Exit RCU idle mode while entering the kernel because it can
897  * run a RCU read side critical section anytime.
898  *
899  * If you add or remove a call to rcu_user_exit(), be sure to test with
900  * CONFIG_RCU_EQS_DEBUG=y.
901  */
rcu_user_exit(void)902 void noinstr rcu_user_exit(void)
903 {
904 	rcu_eqs_exit(1);
905 }
906 
907 /**
908  * __rcu_irq_enter_check_tick - Enable scheduler tick on CPU if RCU needs it.
909  *
910  * The scheduler tick is not normally enabled when CPUs enter the kernel
911  * from nohz_full userspace execution.  After all, nohz_full userspace
912  * execution is an RCU quiescent state and the time executing in the kernel
913  * is quite short.  Except of course when it isn't.  And it is not hard to
914  * cause a large system to spend tens of seconds or even minutes looping
915  * in the kernel, which can cause a number of problems, include RCU CPU
916  * stall warnings.
917  *
918  * Therefore, if a nohz_full CPU fails to report a quiescent state
919  * in a timely manner, the RCU grace-period kthread sets that CPU's
920  * ->rcu_urgent_qs flag with the expectation that the next interrupt or
921  * exception will invoke this function, which will turn on the scheduler
922  * tick, which will enable RCU to detect that CPU's quiescent states,
923  * for example, due to cond_resched() calls in CONFIG_PREEMPT=n kernels.
924  * The tick will be disabled once a quiescent state is reported for
925  * this CPU.
926  *
927  * Of course, in carefully tuned systems, there might never be an
928  * interrupt or exception.  In that case, the RCU grace-period kthread
929  * will eventually cause one to happen.  However, in less carefully
930  * controlled environments, this function allows RCU to get what it
931  * needs without creating otherwise useless interruptions.
932  */
__rcu_irq_enter_check_tick(void)933 void __rcu_irq_enter_check_tick(void)
934 {
935 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
936 
937 	// If we're here from NMI there's nothing to do.
938 	if (in_nmi())
939 		return;
940 
941 	RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
942 			 "Illegal rcu_irq_enter_check_tick() from extended quiescent state");
943 
944 	if (!tick_nohz_full_cpu(rdp->cpu) ||
945 	    !READ_ONCE(rdp->rcu_urgent_qs) ||
946 	    READ_ONCE(rdp->rcu_forced_tick)) {
947 		// RCU doesn't need nohz_full help from this CPU, or it is
948 		// already getting that help.
949 		return;
950 	}
951 
952 	// We get here only when not in an extended quiescent state and
953 	// from interrupts (as opposed to NMIs).  Therefore, (1) RCU is
954 	// already watching and (2) The fact that we are in an interrupt
955 	// handler and that the rcu_node lock is an irq-disabled lock
956 	// prevents self-deadlock.  So we can safely recheck under the lock.
957 	// Note that the nohz_full state currently cannot change.
958 	raw_spin_lock_rcu_node(rdp->mynode);
959 	if (rdp->rcu_urgent_qs && !rdp->rcu_forced_tick) {
960 		// A nohz_full CPU is in the kernel and RCU needs a
961 		// quiescent state.  Turn on the tick!
962 		WRITE_ONCE(rdp->rcu_forced_tick, true);
963 		tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
964 	}
965 	raw_spin_unlock_rcu_node(rdp->mynode);
966 }
967 #endif /* CONFIG_NO_HZ_FULL */
968 
969 /**
970  * rcu_nmi_enter - inform RCU of entry to NMI context
971  *
972  * If the CPU was idle from RCU's viewpoint, update rdp->dynticks and
973  * rdp->dynticks_nmi_nesting to let the RCU grace-period handling know
974  * that the CPU is active.  This implementation permits nested NMIs, as
975  * long as the nesting level does not overflow an int.  (You will probably
976  * run out of stack space first.)
977  *
978  * If you add or remove a call to rcu_nmi_enter(), be sure to test
979  * with CONFIG_RCU_EQS_DEBUG=y.
980  */
rcu_nmi_enter(void)981 noinstr void rcu_nmi_enter(void)
982 {
983 	long incby = 2;
984 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
985 
986 	/* Complain about underflow. */
987 	WARN_ON_ONCE(rdp->dynticks_nmi_nesting < 0);
988 
989 	/*
990 	 * If idle from RCU viewpoint, atomically increment ->dynticks
991 	 * to mark non-idle and increment ->dynticks_nmi_nesting by one.
992 	 * Otherwise, increment ->dynticks_nmi_nesting by two.  This means
993 	 * if ->dynticks_nmi_nesting is equal to one, we are guaranteed
994 	 * to be in the outermost NMI handler that interrupted an RCU-idle
995 	 * period (observation due to Andy Lutomirski).
996 	 */
997 	if (rcu_dynticks_curr_cpu_in_eqs()) {
998 
999 		if (!in_nmi())
1000 			rcu_dynticks_task_exit();
1001 
1002 		// RCU is not watching here ...
1003 		rcu_dynticks_eqs_exit();
1004 		// ... but is watching here.
1005 
1006 		if (!in_nmi()) {
1007 			instrumentation_begin();
1008 			rcu_cleanup_after_idle();
1009 			instrumentation_end();
1010 		}
1011 
1012 		instrumentation_begin();
1013 		// instrumentation for the noinstr rcu_dynticks_curr_cpu_in_eqs()
1014 		instrument_atomic_read(&rdp->dynticks, sizeof(rdp->dynticks));
1015 		// instrumentation for the noinstr rcu_dynticks_eqs_exit()
1016 		instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
1017 
1018 		incby = 1;
1019 	} else if (!in_nmi()) {
1020 		instrumentation_begin();
1021 		rcu_irq_enter_check_tick();
1022 	} else  {
1023 		instrumentation_begin();
1024 	}
1025 
1026 	trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="),
1027 			  rdp->dynticks_nmi_nesting,
1028 			  rdp->dynticks_nmi_nesting + incby, atomic_read(&rdp->dynticks));
1029 	instrumentation_end();
1030 	WRITE_ONCE(rdp->dynticks_nmi_nesting, /* Prevent store tearing. */
1031 		   rdp->dynticks_nmi_nesting + incby);
1032 	barrier();
1033 }
1034 
1035 /**
1036  * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
1037  *
1038  * Enter an interrupt handler, which might possibly result in exiting
1039  * idle mode, in other words, entering the mode in which read-side critical
1040  * sections can occur.  The caller must have disabled interrupts.
1041  *
1042  * Note that the Linux kernel is fully capable of entering an interrupt
1043  * handler that it never exits, for example when doing upcalls to user mode!
1044  * This code assumes that the idle loop never does upcalls to user mode.
1045  * If your architecture's idle loop does do upcalls to user mode (or does
1046  * anything else that results in unbalanced calls to the irq_enter() and
1047  * irq_exit() functions), RCU will give you what you deserve, good and hard.
1048  * But very infrequently and irreproducibly.
1049  *
1050  * Use things like work queues to work around this limitation.
1051  *
1052  * You have been warned.
1053  *
1054  * If you add or remove a call to rcu_irq_enter(), be sure to test with
1055  * CONFIG_RCU_EQS_DEBUG=y.
1056  */
rcu_irq_enter(void)1057 noinstr void rcu_irq_enter(void)
1058 {
1059 	lockdep_assert_irqs_disabled();
1060 	rcu_nmi_enter();
1061 }
1062 
1063 /*
1064  * Wrapper for rcu_irq_enter() where interrupts are enabled.
1065  *
1066  * If you add or remove a call to rcu_irq_enter_irqson(), be sure to test
1067  * with CONFIG_RCU_EQS_DEBUG=y.
1068  */
rcu_irq_enter_irqson(void)1069 void rcu_irq_enter_irqson(void)
1070 {
1071 	unsigned long flags;
1072 
1073 	local_irq_save(flags);
1074 	rcu_irq_enter();
1075 	local_irq_restore(flags);
1076 }
1077 
1078 /*
1079  * If any sort of urgency was applied to the current CPU (for example,
1080  * the scheduler-clock interrupt was enabled on a nohz_full CPU) in order
1081  * to get to a quiescent state, disable it.
1082  */
rcu_disable_urgency_upon_qs(struct rcu_data * rdp)1083 static void rcu_disable_urgency_upon_qs(struct rcu_data *rdp)
1084 {
1085 	raw_lockdep_assert_held_rcu_node(rdp->mynode);
1086 	WRITE_ONCE(rdp->rcu_urgent_qs, false);
1087 	WRITE_ONCE(rdp->rcu_need_heavy_qs, false);
1088 	if (tick_nohz_full_cpu(rdp->cpu) && rdp->rcu_forced_tick) {
1089 		tick_dep_clear_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
1090 		WRITE_ONCE(rdp->rcu_forced_tick, false);
1091 	}
1092 }
1093 
1094 /**
1095  * rcu_is_watching - see if RCU thinks that the current CPU is not idle
1096  *
1097  * Return true if RCU is watching the running CPU, which means that this
1098  * CPU can safely enter RCU read-side critical sections.  In other words,
1099  * if the current CPU is not in its idle loop or is in an interrupt or
1100  * NMI handler, return true.
1101  *
1102  * Make notrace because it can be called by the internal functions of
1103  * ftrace, and making this notrace removes unnecessary recursion calls.
1104  */
rcu_is_watching(void)1105 notrace bool rcu_is_watching(void)
1106 {
1107 	bool ret;
1108 
1109 	preempt_disable_notrace();
1110 	ret = !rcu_dynticks_curr_cpu_in_eqs();
1111 	preempt_enable_notrace();
1112 	return ret;
1113 }
1114 EXPORT_SYMBOL_GPL(rcu_is_watching);
1115 
1116 /*
1117  * If a holdout task is actually running, request an urgent quiescent
1118  * state from its CPU.  This is unsynchronized, so migrations can cause
1119  * the request to go to the wrong CPU.  Which is OK, all that will happen
1120  * is that the CPU's next context switch will be a bit slower and next
1121  * time around this task will generate another request.
1122  */
rcu_request_urgent_qs_task(struct task_struct * t)1123 void rcu_request_urgent_qs_task(struct task_struct *t)
1124 {
1125 	int cpu;
1126 
1127 	barrier();
1128 	cpu = task_cpu(t);
1129 	if (!task_curr(t))
1130 		return; /* This task is not running on that CPU. */
1131 	smp_store_release(per_cpu_ptr(&rcu_data.rcu_urgent_qs, cpu), true);
1132 }
1133 
1134 #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
1135 
1136 /*
1137  * Is the current CPU online as far as RCU is concerned?
1138  *
1139  * Disable preemption to avoid false positives that could otherwise
1140  * happen due to the current CPU number being sampled, this task being
1141  * preempted, its old CPU being taken offline, resuming on some other CPU,
1142  * then determining that its old CPU is now offline.
1143  *
1144  * Disable checking if in an NMI handler because we cannot safely
1145  * report errors from NMI handlers anyway.  In addition, it is OK to use
1146  * RCU on an offline processor during initial boot, hence the check for
1147  * rcu_scheduler_fully_active.
1148  */
rcu_lockdep_current_cpu_online(void)1149 bool rcu_lockdep_current_cpu_online(void)
1150 {
1151 	struct rcu_data *rdp;
1152 	struct rcu_node *rnp;
1153 	bool ret = false;
1154 
1155 	if (in_nmi() || !rcu_scheduler_fully_active)
1156 		return true;
1157 	preempt_disable_notrace();
1158 	rdp = this_cpu_ptr(&rcu_data);
1159 	rnp = rdp->mynode;
1160 	if (rdp->grpmask & rcu_rnp_online_cpus(rnp) || READ_ONCE(rnp->ofl_seq) & 0x1)
1161 		ret = true;
1162 	preempt_enable_notrace();
1163 	return ret;
1164 }
1165 EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
1166 
1167 #endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */
1168 
1169 /*
1170  * We are reporting a quiescent state on behalf of some other CPU, so
1171  * it is our responsibility to check for and handle potential overflow
1172  * of the rcu_node ->gp_seq counter with respect to the rcu_data counters.
1173  * After all, the CPU might be in deep idle state, and thus executing no
1174  * code whatsoever.
1175  */
rcu_gpnum_ovf(struct rcu_node * rnp,struct rcu_data * rdp)1176 static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp)
1177 {
1178 	raw_lockdep_assert_held_rcu_node(rnp);
1179 	if (ULONG_CMP_LT(rcu_seq_current(&rdp->gp_seq) + ULONG_MAX / 4,
1180 			 rnp->gp_seq))
1181 		WRITE_ONCE(rdp->gpwrap, true);
1182 	if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq))
1183 		rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4;
1184 }
1185 
1186 /*
1187  * Snapshot the specified CPU's dynticks counter so that we can later
1188  * credit them with an implicit quiescent state.  Return 1 if this CPU
1189  * is in dynticks idle mode, which is an extended quiescent state.
1190  */
dyntick_save_progress_counter(struct rcu_data * rdp)1191 static int dyntick_save_progress_counter(struct rcu_data *rdp)
1192 {
1193 	rdp->dynticks_snap = rcu_dynticks_snap(rdp);
1194 	if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) {
1195 		trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
1196 		rcu_gpnum_ovf(rdp->mynode, rdp);
1197 		return 1;
1198 	}
1199 	return 0;
1200 }
1201 
1202 /*
1203  * Return true if the specified CPU has passed through a quiescent
1204  * state by virtue of being in or having passed through an dynticks
1205  * idle state since the last call to dyntick_save_progress_counter()
1206  * for this same CPU, or by virtue of having been offline.
1207  */
rcu_implicit_dynticks_qs(struct rcu_data * rdp)1208 static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
1209 {
1210 	unsigned long jtsq;
1211 	bool *rnhqp;
1212 	bool *ruqp;
1213 	struct rcu_node *rnp = rdp->mynode;
1214 
1215 	/*
1216 	 * If the CPU passed through or entered a dynticks idle phase with
1217 	 * no active irq/NMI handlers, then we can safely pretend that the CPU
1218 	 * already acknowledged the request to pass through a quiescent
1219 	 * state.  Either way, that CPU cannot possibly be in an RCU
1220 	 * read-side critical section that started before the beginning
1221 	 * of the current RCU grace period.
1222 	 */
1223 	if (rcu_dynticks_in_eqs_since(rdp, rdp->dynticks_snap)) {
1224 		trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
1225 		rcu_gpnum_ovf(rnp, rdp);
1226 		return 1;
1227 	}
1228 
1229 	/*
1230 	 * Complain if a CPU that is considered to be offline from RCU's
1231 	 * perspective has not yet reported a quiescent state.  After all,
1232 	 * the offline CPU should have reported a quiescent state during
1233 	 * the CPU-offline process, or, failing that, by rcu_gp_init()
1234 	 * if it ran concurrently with either the CPU going offline or the
1235 	 * last task on a leaf rcu_node structure exiting its RCU read-side
1236 	 * critical section while all CPUs corresponding to that structure
1237 	 * are offline.  This added warning detects bugs in any of these
1238 	 * code paths.
1239 	 *
1240 	 * The rcu_node structure's ->lock is held here, which excludes
1241 	 * the relevant portions the CPU-hotplug code, the grace-period
1242 	 * initialization code, and the rcu_read_unlock() code paths.
1243 	 *
1244 	 * For more detail, please refer to the "Hotplug CPU" section
1245 	 * of RCU's Requirements documentation.
1246 	 */
1247 	if (WARN_ON_ONCE(!(rdp->grpmask & rcu_rnp_online_cpus(rnp)))) {
1248 		bool onl;
1249 		struct rcu_node *rnp1;
1250 
1251 		pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n",
1252 			__func__, rnp->grplo, rnp->grphi, rnp->level,
1253 			(long)rnp->gp_seq, (long)rnp->completedqs);
1254 		for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent)
1255 			pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx ->rcu_gp_init_mask %#lx\n",
1256 				__func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext, rnp1->rcu_gp_init_mask);
1257 		onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp));
1258 		pr_info("%s %d: %c online: %ld(%d) offline: %ld(%d)\n",
1259 			__func__, rdp->cpu, ".o"[onl],
1260 			(long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags,
1261 			(long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags);
1262 		return 1; /* Break things loose after complaining. */
1263 	}
1264 
1265 	/*
1266 	 * A CPU running for an extended time within the kernel can
1267 	 * delay RCU grace periods: (1) At age jiffies_to_sched_qs,
1268 	 * set .rcu_urgent_qs, (2) At age 2*jiffies_to_sched_qs, set
1269 	 * both .rcu_need_heavy_qs and .rcu_urgent_qs.  Note that the
1270 	 * unsynchronized assignments to the per-CPU rcu_need_heavy_qs
1271 	 * variable are safe because the assignments are repeated if this
1272 	 * CPU failed to pass through a quiescent state.  This code
1273 	 * also checks .jiffies_resched in case jiffies_to_sched_qs
1274 	 * is set way high.
1275 	 */
1276 	jtsq = READ_ONCE(jiffies_to_sched_qs);
1277 	ruqp = per_cpu_ptr(&rcu_data.rcu_urgent_qs, rdp->cpu);
1278 	rnhqp = &per_cpu(rcu_data.rcu_need_heavy_qs, rdp->cpu);
1279 	if (!READ_ONCE(*rnhqp) &&
1280 	    (time_after(jiffies, rcu_state.gp_start + jtsq * 2) ||
1281 	     time_after(jiffies, rcu_state.jiffies_resched) ||
1282 	     rcu_state.cbovld)) {
1283 		WRITE_ONCE(*rnhqp, true);
1284 		/* Store rcu_need_heavy_qs before rcu_urgent_qs. */
1285 		smp_store_release(ruqp, true);
1286 	} else if (time_after(jiffies, rcu_state.gp_start + jtsq)) {
1287 		WRITE_ONCE(*ruqp, true);
1288 	}
1289 
1290 	/*
1291 	 * NO_HZ_FULL CPUs can run in-kernel without rcu_sched_clock_irq!
1292 	 * The above code handles this, but only for straight cond_resched().
1293 	 * And some in-kernel loops check need_resched() before calling
1294 	 * cond_resched(), which defeats the above code for CPUs that are
1295 	 * running in-kernel with scheduling-clock interrupts disabled.
1296 	 * So hit them over the head with the resched_cpu() hammer!
1297 	 */
1298 	if (tick_nohz_full_cpu(rdp->cpu) &&
1299 	    (time_after(jiffies, READ_ONCE(rdp->last_fqs_resched) + jtsq * 3) ||
1300 	     rcu_state.cbovld)) {
1301 		WRITE_ONCE(*ruqp, true);
1302 		resched_cpu(rdp->cpu);
1303 		WRITE_ONCE(rdp->last_fqs_resched, jiffies);
1304 	}
1305 
1306 	/*
1307 	 * If more than halfway to RCU CPU stall-warning time, invoke
1308 	 * resched_cpu() more frequently to try to loosen things up a bit.
1309 	 * Also check to see if the CPU is getting hammered with interrupts,
1310 	 * but only once per grace period, just to keep the IPIs down to
1311 	 * a dull roar.
1312 	 */
1313 	if (time_after(jiffies, rcu_state.jiffies_resched)) {
1314 		if (time_after(jiffies,
1315 			       READ_ONCE(rdp->last_fqs_resched) + jtsq)) {
1316 			resched_cpu(rdp->cpu);
1317 			WRITE_ONCE(rdp->last_fqs_resched, jiffies);
1318 		}
1319 		if (IS_ENABLED(CONFIG_IRQ_WORK) &&
1320 		    !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
1321 		    (rnp->ffmask & rdp->grpmask)) {
1322 			init_irq_work(&rdp->rcu_iw, rcu_iw_handler);
1323 			atomic_set(&rdp->rcu_iw.flags, IRQ_WORK_HARD_IRQ);
1324 			rdp->rcu_iw_pending = true;
1325 			rdp->rcu_iw_gp_seq = rnp->gp_seq;
1326 			irq_work_queue_on(&rdp->rcu_iw, rdp->cpu);
1327 		}
1328 	}
1329 
1330 	return 0;
1331 }
1332 
1333 /* Trace-event wrapper function for trace_rcu_future_grace_period.  */
trace_rcu_this_gp(struct rcu_node * rnp,struct rcu_data * rdp,unsigned long gp_seq_req,const char * s)1334 static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
1335 			      unsigned long gp_seq_req, const char *s)
1336 {
1337 	trace_rcu_future_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
1338 				      gp_seq_req, rnp->level,
1339 				      rnp->grplo, rnp->grphi, s);
1340 }
1341 
1342 /*
1343  * rcu_start_this_gp - Request the start of a particular grace period
1344  * @rnp_start: The leaf node of the CPU from which to start.
1345  * @rdp: The rcu_data corresponding to the CPU from which to start.
1346  * @gp_seq_req: The gp_seq of the grace period to start.
1347  *
1348  * Start the specified grace period, as needed to handle newly arrived
1349  * callbacks.  The required future grace periods are recorded in each
1350  * rcu_node structure's ->gp_seq_needed field.  Returns true if there
1351  * is reason to awaken the grace-period kthread.
1352  *
1353  * The caller must hold the specified rcu_node structure's ->lock, which
1354  * is why the caller is responsible for waking the grace-period kthread.
1355  *
1356  * Returns true if the GP thread needs to be awakened else false.
1357  */
rcu_start_this_gp(struct rcu_node * rnp_start,struct rcu_data * rdp,unsigned long gp_seq_req)1358 static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp,
1359 			      unsigned long gp_seq_req)
1360 {
1361 	bool ret = false;
1362 	struct rcu_node *rnp;
1363 
1364 	/*
1365 	 * Use funnel locking to either acquire the root rcu_node
1366 	 * structure's lock or bail out if the need for this grace period
1367 	 * has already been recorded -- or if that grace period has in
1368 	 * fact already started.  If there is already a grace period in
1369 	 * progress in a non-leaf node, no recording is needed because the
1370 	 * end of the grace period will scan the leaf rcu_node structures.
1371 	 * Note that rnp_start->lock must not be released.
1372 	 */
1373 	raw_lockdep_assert_held_rcu_node(rnp_start);
1374 	trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, TPS("Startleaf"));
1375 	for (rnp = rnp_start; 1; rnp = rnp->parent) {
1376 		if (rnp != rnp_start)
1377 			raw_spin_lock_rcu_node(rnp);
1378 		if (ULONG_CMP_GE(rnp->gp_seq_needed, gp_seq_req) ||
1379 		    rcu_seq_started(&rnp->gp_seq, gp_seq_req) ||
1380 		    (rnp != rnp_start &&
1381 		     rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))) {
1382 			trace_rcu_this_gp(rnp, rdp, gp_seq_req,
1383 					  TPS("Prestarted"));
1384 			goto unlock_out;
1385 		}
1386 		WRITE_ONCE(rnp->gp_seq_needed, gp_seq_req);
1387 		if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) {
1388 			/*
1389 			 * We just marked the leaf or internal node, and a
1390 			 * grace period is in progress, which means that
1391 			 * rcu_gp_cleanup() will see the marking.  Bail to
1392 			 * reduce contention.
1393 			 */
1394 			trace_rcu_this_gp(rnp_start, rdp, gp_seq_req,
1395 					  TPS("Startedleaf"));
1396 			goto unlock_out;
1397 		}
1398 		if (rnp != rnp_start && rnp->parent != NULL)
1399 			raw_spin_unlock_rcu_node(rnp);
1400 		if (!rnp->parent)
1401 			break;  /* At root, and perhaps also leaf. */
1402 	}
1403 
1404 	/* If GP already in progress, just leave, otherwise start one. */
1405 	if (rcu_gp_in_progress()) {
1406 		trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot"));
1407 		goto unlock_out;
1408 	}
1409 	trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot"));
1410 	WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags | RCU_GP_FLAG_INIT);
1411 	WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
1412 	if (!READ_ONCE(rcu_state.gp_kthread)) {
1413 		trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread"));
1414 		goto unlock_out;
1415 	}
1416 	trace_rcu_grace_period(rcu_state.name, data_race(rcu_state.gp_seq), TPS("newreq"));
1417 	ret = true;  /* Caller must wake GP kthread. */
1418 unlock_out:
1419 	/* Push furthest requested GP to leaf node and rcu_data structure. */
1420 	if (ULONG_CMP_LT(gp_seq_req, rnp->gp_seq_needed)) {
1421 		WRITE_ONCE(rnp_start->gp_seq_needed, rnp->gp_seq_needed);
1422 		WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed);
1423 	}
1424 	if (rnp != rnp_start)
1425 		raw_spin_unlock_rcu_node(rnp);
1426 	return ret;
1427 }
1428 
1429 /*
1430  * Clean up any old requests for the just-ended grace period.  Also return
1431  * whether any additional grace periods have been requested.
1432  */
rcu_future_gp_cleanup(struct rcu_node * rnp)1433 static bool rcu_future_gp_cleanup(struct rcu_node *rnp)
1434 {
1435 	bool needmore;
1436 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
1437 
1438 	needmore = ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed);
1439 	if (!needmore)
1440 		rnp->gp_seq_needed = rnp->gp_seq; /* Avoid counter wrap. */
1441 	trace_rcu_this_gp(rnp, rdp, rnp->gp_seq,
1442 			  needmore ? TPS("CleanupMore") : TPS("Cleanup"));
1443 	return needmore;
1444 }
1445 
1446 /*
1447  * Awaken the grace-period kthread.  Don't do a self-awaken (unless in an
1448  * interrupt or softirq handler, in which case we just might immediately
1449  * sleep upon return, resulting in a grace-period hang), and don't bother
1450  * awakening when there is nothing for the grace-period kthread to do
1451  * (as in several CPUs raced to awaken, we lost), and finally don't try
1452  * to awaken a kthread that has not yet been created.  If all those checks
1453  * are passed, track some debug information and awaken.
1454  *
1455  * So why do the self-wakeup when in an interrupt or softirq handler
1456  * in the grace-period kthread's context?  Because the kthread might have
1457  * been interrupted just as it was going to sleep, and just after the final
1458  * pre-sleep check of the awaken condition.  In this case, a wakeup really
1459  * is required, and is therefore supplied.
1460  */
rcu_gp_kthread_wake(void)1461 static void rcu_gp_kthread_wake(void)
1462 {
1463 	struct task_struct *t = READ_ONCE(rcu_state.gp_kthread);
1464 
1465 	if ((current == t && !in_irq() && !in_serving_softirq()) ||
1466 	    !READ_ONCE(rcu_state.gp_flags) || !t)
1467 		return;
1468 	WRITE_ONCE(rcu_state.gp_wake_time, jiffies);
1469 	WRITE_ONCE(rcu_state.gp_wake_seq, READ_ONCE(rcu_state.gp_seq));
1470 	swake_up_one(&rcu_state.gp_wq);
1471 }
1472 
1473 /*
1474  * If there is room, assign a ->gp_seq number to any callbacks on this
1475  * CPU that have not already been assigned.  Also accelerate any callbacks
1476  * that were previously assigned a ->gp_seq number that has since proven
1477  * to be too conservative, which can happen if callbacks get assigned a
1478  * ->gp_seq number while RCU is idle, but with reference to a non-root
1479  * rcu_node structure.  This function is idempotent, so it does not hurt
1480  * to call it repeatedly.  Returns an flag saying that we should awaken
1481  * the RCU grace-period kthread.
1482  *
1483  * The caller must hold rnp->lock with interrupts disabled.
1484  */
rcu_accelerate_cbs(struct rcu_node * rnp,struct rcu_data * rdp)1485 static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
1486 {
1487 	unsigned long gp_seq_req;
1488 	bool ret = false;
1489 
1490 	rcu_lockdep_assert_cblist_protected(rdp);
1491 	raw_lockdep_assert_held_rcu_node(rnp);
1492 
1493 	/* If no pending (not yet ready to invoke) callbacks, nothing to do. */
1494 	if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1495 		return false;
1496 
1497 	/*
1498 	 * Callbacks are often registered with incomplete grace-period
1499 	 * information.  Something about the fact that getting exact
1500 	 * information requires acquiring a global lock...  RCU therefore
1501 	 * makes a conservative estimate of the grace period number at which
1502 	 * a given callback will become ready to invoke.	The following
1503 	 * code checks this estimate and improves it when possible, thus
1504 	 * accelerating callback invocation to an earlier grace-period
1505 	 * number.
1506 	 */
1507 	gp_seq_req = rcu_seq_snap(&rcu_state.gp_seq);
1508 	if (rcu_segcblist_accelerate(&rdp->cblist, gp_seq_req))
1509 		ret = rcu_start_this_gp(rnp, rdp, gp_seq_req);
1510 
1511 	/* Trace depending on how much we were able to accelerate. */
1512 	if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL))
1513 		trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccWaitCB"));
1514 	else
1515 		trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccReadyCB"));
1516 
1517 	return ret;
1518 }
1519 
1520 /*
1521  * Similar to rcu_accelerate_cbs(), but does not require that the leaf
1522  * rcu_node structure's ->lock be held.  It consults the cached value
1523  * of ->gp_seq_needed in the rcu_data structure, and if that indicates
1524  * that a new grace-period request be made, invokes rcu_accelerate_cbs()
1525  * while holding the leaf rcu_node structure's ->lock.
1526  */
rcu_accelerate_cbs_unlocked(struct rcu_node * rnp,struct rcu_data * rdp)1527 static void rcu_accelerate_cbs_unlocked(struct rcu_node *rnp,
1528 					struct rcu_data *rdp)
1529 {
1530 	unsigned long c;
1531 	bool needwake;
1532 
1533 	rcu_lockdep_assert_cblist_protected(rdp);
1534 	c = rcu_seq_snap(&rcu_state.gp_seq);
1535 	if (!READ_ONCE(rdp->gpwrap) && ULONG_CMP_GE(rdp->gp_seq_needed, c)) {
1536 		/* Old request still live, so mark recent callbacks. */
1537 		(void)rcu_segcblist_accelerate(&rdp->cblist, c);
1538 		return;
1539 	}
1540 	raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
1541 	needwake = rcu_accelerate_cbs(rnp, rdp);
1542 	raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
1543 	if (needwake)
1544 		rcu_gp_kthread_wake();
1545 }
1546 
1547 /*
1548  * Move any callbacks whose grace period has completed to the
1549  * RCU_DONE_TAIL sublist, then compact the remaining sublists and
1550  * assign ->gp_seq numbers to any callbacks in the RCU_NEXT_TAIL
1551  * sublist.  This function is idempotent, so it does not hurt to
1552  * invoke it repeatedly.  As long as it is not invoked -too- often...
1553  * Returns true if the RCU grace-period kthread needs to be awakened.
1554  *
1555  * The caller must hold rnp->lock with interrupts disabled.
1556  */
rcu_advance_cbs(struct rcu_node * rnp,struct rcu_data * rdp)1557 static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
1558 {
1559 	rcu_lockdep_assert_cblist_protected(rdp);
1560 	raw_lockdep_assert_held_rcu_node(rnp);
1561 
1562 	/* If no pending (not yet ready to invoke) callbacks, nothing to do. */
1563 	if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1564 		return false;
1565 
1566 	/*
1567 	 * Find all callbacks whose ->gp_seq numbers indicate that they
1568 	 * are ready to invoke, and put them into the RCU_DONE_TAIL sublist.
1569 	 */
1570 	rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq);
1571 
1572 	/* Classify any remaining callbacks. */
1573 	return rcu_accelerate_cbs(rnp, rdp);
1574 }
1575 
1576 /*
1577  * Move and classify callbacks, but only if doing so won't require
1578  * that the RCU grace-period kthread be awakened.
1579  */
rcu_advance_cbs_nowake(struct rcu_node * rnp,struct rcu_data * rdp)1580 static void __maybe_unused rcu_advance_cbs_nowake(struct rcu_node *rnp,
1581 						  struct rcu_data *rdp)
1582 {
1583 	rcu_lockdep_assert_cblist_protected(rdp);
1584 	if (!rcu_seq_state(rcu_seq_current(&rnp->gp_seq)) || !raw_spin_trylock_rcu_node(rnp))
1585 		return;
1586 	// The grace period cannot end while we hold the rcu_node lock.
1587 	if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))
1588 		WARN_ON_ONCE(rcu_advance_cbs(rnp, rdp));
1589 	raw_spin_unlock_rcu_node(rnp);
1590 }
1591 
1592 /*
1593  * In CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels, attempt to generate a
1594  * quiescent state.  This is intended to be invoked when the CPU notices
1595  * a new grace period.
1596  */
rcu_strict_gp_check_qs(void)1597 static void rcu_strict_gp_check_qs(void)
1598 {
1599 	if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) {
1600 		rcu_read_lock();
1601 		rcu_read_unlock();
1602 	}
1603 }
1604 
1605 /*
1606  * Update CPU-local rcu_data state to record the beginnings and ends of
1607  * grace periods.  The caller must hold the ->lock of the leaf rcu_node
1608  * structure corresponding to the current CPU, and must have irqs disabled.
1609  * Returns true if the grace-period kthread needs to be awakened.
1610  */
__note_gp_changes(struct rcu_node * rnp,struct rcu_data * rdp)1611 static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp)
1612 {
1613 	bool ret = false;
1614 	bool need_qs;
1615 	const bool offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
1616 			       rcu_segcblist_is_offloaded(&rdp->cblist);
1617 
1618 	raw_lockdep_assert_held_rcu_node(rnp);
1619 
1620 	if (rdp->gp_seq == rnp->gp_seq)
1621 		return false; /* Nothing to do. */
1622 
1623 	/* Handle the ends of any preceding grace periods first. */
1624 	if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) ||
1625 	    unlikely(READ_ONCE(rdp->gpwrap))) {
1626 		if (!offloaded)
1627 			ret = rcu_advance_cbs(rnp, rdp); /* Advance CBs. */
1628 		rdp->core_needs_qs = false;
1629 		trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuend"));
1630 	} else {
1631 		if (!offloaded)
1632 			ret = rcu_accelerate_cbs(rnp, rdp); /* Recent CBs. */
1633 		if (rdp->core_needs_qs)
1634 			rdp->core_needs_qs = !!(rnp->qsmask & rdp->grpmask);
1635 	}
1636 
1637 	/* Now handle the beginnings of any new-to-this-CPU grace periods. */
1638 	if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) ||
1639 	    unlikely(READ_ONCE(rdp->gpwrap))) {
1640 		/*
1641 		 * If the current grace period is waiting for this CPU,
1642 		 * set up to detect a quiescent state, otherwise don't
1643 		 * go looking for one.
1644 		 */
1645 		trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, TPS("cpustart"));
1646 		need_qs = !!(rnp->qsmask & rdp->grpmask);
1647 		rdp->cpu_no_qs.b.norm = need_qs;
1648 		rdp->core_needs_qs = need_qs;
1649 		zero_cpu_stall_ticks(rdp);
1650 	}
1651 	rdp->gp_seq = rnp->gp_seq;  /* Remember new grace-period state. */
1652 	if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap)
1653 		WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed);
1654 	WRITE_ONCE(rdp->gpwrap, false);
1655 	rcu_gpnum_ovf(rnp, rdp);
1656 	return ret;
1657 }
1658 
note_gp_changes(struct rcu_data * rdp)1659 static void note_gp_changes(struct rcu_data *rdp)
1660 {
1661 	unsigned long flags;
1662 	bool needwake;
1663 	struct rcu_node *rnp;
1664 
1665 	local_irq_save(flags);
1666 	rnp = rdp->mynode;
1667 	if ((rdp->gp_seq == rcu_seq_current(&rnp->gp_seq) &&
1668 	     !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */
1669 	    !raw_spin_trylock_rcu_node(rnp)) { /* irqs already off, so later. */
1670 		local_irq_restore(flags);
1671 		return;
1672 	}
1673 	needwake = __note_gp_changes(rnp, rdp);
1674 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1675 	rcu_strict_gp_check_qs();
1676 	if (needwake)
1677 		rcu_gp_kthread_wake();
1678 }
1679 
rcu_gp_slow(int delay)1680 static void rcu_gp_slow(int delay)
1681 {
1682 	if (delay > 0 &&
1683 	    !(rcu_seq_ctr(rcu_state.gp_seq) %
1684 	      (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay)))
1685 		schedule_timeout_idle(delay);
1686 }
1687 
1688 static unsigned long sleep_duration;
1689 
1690 /* Allow rcutorture to stall the grace-period kthread. */
rcu_gp_set_torture_wait(int duration)1691 void rcu_gp_set_torture_wait(int duration)
1692 {
1693 	if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST) && duration > 0)
1694 		WRITE_ONCE(sleep_duration, duration);
1695 }
1696 EXPORT_SYMBOL_GPL(rcu_gp_set_torture_wait);
1697 
1698 /* Actually implement the aforementioned wait. */
rcu_gp_torture_wait(void)1699 static void rcu_gp_torture_wait(void)
1700 {
1701 	unsigned long duration;
1702 
1703 	if (!IS_ENABLED(CONFIG_RCU_TORTURE_TEST))
1704 		return;
1705 	duration = xchg(&sleep_duration, 0UL);
1706 	if (duration > 0) {
1707 		pr_alert("%s: Waiting %lu jiffies\n", __func__, duration);
1708 		schedule_timeout_idle(duration);
1709 		pr_alert("%s: Wait complete\n", __func__);
1710 	}
1711 }
1712 
1713 /*
1714  * Handler for on_each_cpu() to invoke the target CPU's RCU core
1715  * processing.
1716  */
rcu_strict_gp_boundary(void * unused)1717 static void rcu_strict_gp_boundary(void *unused)
1718 {
1719 	invoke_rcu_core();
1720 }
1721 
1722 /*
1723  * Initialize a new grace period.  Return false if no grace period required.
1724  */
rcu_gp_init(void)1725 static bool rcu_gp_init(void)
1726 {
1727 	unsigned long firstseq;
1728 	unsigned long flags;
1729 	unsigned long oldmask;
1730 	unsigned long mask;
1731 	struct rcu_data *rdp;
1732 	struct rcu_node *rnp = rcu_get_root();
1733 
1734 	WRITE_ONCE(rcu_state.gp_activity, jiffies);
1735 	raw_spin_lock_irq_rcu_node(rnp);
1736 	if (!READ_ONCE(rcu_state.gp_flags)) {
1737 		/* Spurious wakeup, tell caller to go back to sleep.  */
1738 		raw_spin_unlock_irq_rcu_node(rnp);
1739 		return false;
1740 	}
1741 	WRITE_ONCE(rcu_state.gp_flags, 0); /* Clear all flags: New GP. */
1742 
1743 	if (WARN_ON_ONCE(rcu_gp_in_progress())) {
1744 		/*
1745 		 * Grace period already in progress, don't start another.
1746 		 * Not supposed to be able to happen.
1747 		 */
1748 		raw_spin_unlock_irq_rcu_node(rnp);
1749 		return false;
1750 	}
1751 
1752 	/* Advance to a new grace period and initialize state. */
1753 	record_gp_stall_check_time();
1754 	/* Record GP times before starting GP, hence rcu_seq_start(). */
1755 	rcu_seq_start(&rcu_state.gp_seq);
1756 	ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq);
1757 	trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("start"));
1758 	raw_spin_unlock_irq_rcu_node(rnp);
1759 
1760 	/*
1761 	 * Apply per-leaf buffered online and offline operations to
1762 	 * the rcu_node tree. Note that this new grace period need not
1763 	 * wait for subsequent online CPUs, and that RCU hooks in the CPU
1764 	 * offlining path, when combined with checks in this function,
1765 	 * will handle CPUs that are currently going offline or that will
1766 	 * go offline later.  Please also refer to "Hotplug CPU" section
1767 	 * of RCU's Requirements documentation.
1768 	 */
1769 	rcu_state.gp_state = RCU_GP_ONOFF;
1770 	rcu_for_each_leaf_node(rnp) {
1771 		smp_mb(); // Pair with barriers used when updating ->ofl_seq to odd values.
1772 		firstseq = READ_ONCE(rnp->ofl_seq);
1773 		if (firstseq & 0x1)
1774 			while (firstseq == READ_ONCE(rnp->ofl_seq))
1775 				schedule_timeout_idle(1);  // Can't wake unless RCU is watching.
1776 		smp_mb(); // Pair with barriers used when updating ->ofl_seq to even values.
1777 		raw_spin_lock(&rcu_state.ofl_lock);
1778 		raw_spin_lock_irq_rcu_node(rnp);
1779 		if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
1780 		    !rnp->wait_blkd_tasks) {
1781 			/* Nothing to do on this leaf rcu_node structure. */
1782 			raw_spin_unlock_irq_rcu_node(rnp);
1783 			raw_spin_unlock(&rcu_state.ofl_lock);
1784 			continue;
1785 		}
1786 
1787 		/* Record old state, apply changes to ->qsmaskinit field. */
1788 		oldmask = rnp->qsmaskinit;
1789 		rnp->qsmaskinit = rnp->qsmaskinitnext;
1790 
1791 		/* If zero-ness of ->qsmaskinit changed, propagate up tree. */
1792 		if (!oldmask != !rnp->qsmaskinit) {
1793 			if (!oldmask) { /* First online CPU for rcu_node. */
1794 				if (!rnp->wait_blkd_tasks) /* Ever offline? */
1795 					rcu_init_new_rnp(rnp);
1796 			} else if (rcu_preempt_has_tasks(rnp)) {
1797 				rnp->wait_blkd_tasks = true; /* blocked tasks */
1798 			} else { /* Last offline CPU and can propagate. */
1799 				rcu_cleanup_dead_rnp(rnp);
1800 			}
1801 		}
1802 
1803 		/*
1804 		 * If all waited-on tasks from prior grace period are
1805 		 * done, and if all this rcu_node structure's CPUs are
1806 		 * still offline, propagate up the rcu_node tree and
1807 		 * clear ->wait_blkd_tasks.  Otherwise, if one of this
1808 		 * rcu_node structure's CPUs has since come back online,
1809 		 * simply clear ->wait_blkd_tasks.
1810 		 */
1811 		if (rnp->wait_blkd_tasks &&
1812 		    (!rcu_preempt_has_tasks(rnp) || rnp->qsmaskinit)) {
1813 			rnp->wait_blkd_tasks = false;
1814 			if (!rnp->qsmaskinit)
1815 				rcu_cleanup_dead_rnp(rnp);
1816 		}
1817 
1818 		raw_spin_unlock_irq_rcu_node(rnp);
1819 		raw_spin_unlock(&rcu_state.ofl_lock);
1820 	}
1821 	rcu_gp_slow(gp_preinit_delay); /* Races with CPU hotplug. */
1822 
1823 	/*
1824 	 * Set the quiescent-state-needed bits in all the rcu_node
1825 	 * structures for all currently online CPUs in breadth-first
1826 	 * order, starting from the root rcu_node structure, relying on the
1827 	 * layout of the tree within the rcu_state.node[] array.  Note that
1828 	 * other CPUs will access only the leaves of the hierarchy, thus
1829 	 * seeing that no grace period is in progress, at least until the
1830 	 * corresponding leaf node has been initialized.
1831 	 *
1832 	 * The grace period cannot complete until the initialization
1833 	 * process finishes, because this kthread handles both.
1834 	 */
1835 	rcu_state.gp_state = RCU_GP_INIT;
1836 	rcu_for_each_node_breadth_first(rnp) {
1837 		rcu_gp_slow(gp_init_delay);
1838 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
1839 		rdp = this_cpu_ptr(&rcu_data);
1840 		rcu_preempt_check_blocked_tasks(rnp);
1841 		rnp->qsmask = rnp->qsmaskinit;
1842 		WRITE_ONCE(rnp->gp_seq, rcu_state.gp_seq);
1843 		if (rnp == rdp->mynode)
1844 			(void)__note_gp_changes(rnp, rdp);
1845 		rcu_preempt_boost_start_gp(rnp);
1846 		trace_rcu_grace_period_init(rcu_state.name, rnp->gp_seq,
1847 					    rnp->level, rnp->grplo,
1848 					    rnp->grphi, rnp->qsmask);
1849 		/* Quiescent states for tasks on any now-offline CPUs. */
1850 		mask = rnp->qsmask & ~rnp->qsmaskinitnext;
1851 		rnp->rcu_gp_init_mask = mask;
1852 		if ((mask || rnp->wait_blkd_tasks) && rcu_is_leaf_node(rnp))
1853 			rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
1854 		else
1855 			raw_spin_unlock_irq_rcu_node(rnp);
1856 		cond_resched_tasks_rcu_qs();
1857 		WRITE_ONCE(rcu_state.gp_activity, jiffies);
1858 	}
1859 
1860 	// If strict, make all CPUs aware of new grace period.
1861 	if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
1862 		on_each_cpu(rcu_strict_gp_boundary, NULL, 0);
1863 
1864 	return true;
1865 }
1866 
1867 /*
1868  * Helper function for swait_event_idle_exclusive() wakeup at force-quiescent-state
1869  * time.
1870  */
rcu_gp_fqs_check_wake(int * gfp)1871 static bool rcu_gp_fqs_check_wake(int *gfp)
1872 {
1873 	struct rcu_node *rnp = rcu_get_root();
1874 
1875 	// If under overload conditions, force an immediate FQS scan.
1876 	if (*gfp & RCU_GP_FLAG_OVLD)
1877 		return true;
1878 
1879 	// Someone like call_rcu() requested a force-quiescent-state scan.
1880 	*gfp = READ_ONCE(rcu_state.gp_flags);
1881 	if (*gfp & RCU_GP_FLAG_FQS)
1882 		return true;
1883 
1884 	// The current grace period has completed.
1885 	if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp))
1886 		return true;
1887 
1888 	return false;
1889 }
1890 
1891 /*
1892  * Do one round of quiescent-state forcing.
1893  */
rcu_gp_fqs(bool first_time)1894 static void rcu_gp_fqs(bool first_time)
1895 {
1896 	struct rcu_node *rnp = rcu_get_root();
1897 
1898 	WRITE_ONCE(rcu_state.gp_activity, jiffies);
1899 	WRITE_ONCE(rcu_state.n_force_qs, rcu_state.n_force_qs + 1);
1900 	if (first_time) {
1901 		/* Collect dyntick-idle snapshots. */
1902 		force_qs_rnp(dyntick_save_progress_counter);
1903 	} else {
1904 		/* Handle dyntick-idle and offline CPUs. */
1905 		force_qs_rnp(rcu_implicit_dynticks_qs);
1906 	}
1907 	/* Clear flag to prevent immediate re-entry. */
1908 	if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
1909 		raw_spin_lock_irq_rcu_node(rnp);
1910 		WRITE_ONCE(rcu_state.gp_flags,
1911 			   READ_ONCE(rcu_state.gp_flags) & ~RCU_GP_FLAG_FQS);
1912 		raw_spin_unlock_irq_rcu_node(rnp);
1913 	}
1914 }
1915 
1916 /*
1917  * Loop doing repeated quiescent-state forcing until the grace period ends.
1918  */
rcu_gp_fqs_loop(void)1919 static void rcu_gp_fqs_loop(void)
1920 {
1921 	bool first_gp_fqs;
1922 	int gf = 0;
1923 	unsigned long j;
1924 	int ret;
1925 	struct rcu_node *rnp = rcu_get_root();
1926 
1927 	first_gp_fqs = true;
1928 	j = READ_ONCE(jiffies_till_first_fqs);
1929 	if (rcu_state.cbovld)
1930 		gf = RCU_GP_FLAG_OVLD;
1931 	ret = 0;
1932 	for (;;) {
1933 		if (!ret) {
1934 			rcu_state.jiffies_force_qs = jiffies + j;
1935 			WRITE_ONCE(rcu_state.jiffies_kick_kthreads,
1936 				   jiffies + (j ? 3 * j : 2));
1937 		}
1938 		trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1939 				       TPS("fqswait"));
1940 		rcu_state.gp_state = RCU_GP_WAIT_FQS;
1941 		ret = swait_event_idle_timeout_exclusive(
1942 				rcu_state.gp_wq, rcu_gp_fqs_check_wake(&gf), j);
1943 		rcu_gp_torture_wait();
1944 		rcu_state.gp_state = RCU_GP_DOING_FQS;
1945 		/* Locking provides needed memory barriers. */
1946 		/* If grace period done, leave loop. */
1947 		if (!READ_ONCE(rnp->qsmask) &&
1948 		    !rcu_preempt_blocked_readers_cgp(rnp))
1949 			break;
1950 		/* If time for quiescent-state forcing, do it. */
1951 		if (!time_after(rcu_state.jiffies_force_qs, jiffies) ||
1952 		    (gf & (RCU_GP_FLAG_FQS | RCU_GP_FLAG_OVLD))) {
1953 			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1954 					       TPS("fqsstart"));
1955 			rcu_gp_fqs(first_gp_fqs);
1956 			gf = 0;
1957 			if (first_gp_fqs) {
1958 				first_gp_fqs = false;
1959 				gf = rcu_state.cbovld ? RCU_GP_FLAG_OVLD : 0;
1960 			}
1961 			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1962 					       TPS("fqsend"));
1963 			cond_resched_tasks_rcu_qs();
1964 			WRITE_ONCE(rcu_state.gp_activity, jiffies);
1965 			ret = 0; /* Force full wait till next FQS. */
1966 			j = READ_ONCE(jiffies_till_next_fqs);
1967 		} else {
1968 			/* Deal with stray signal. */
1969 			cond_resched_tasks_rcu_qs();
1970 			WRITE_ONCE(rcu_state.gp_activity, jiffies);
1971 			WARN_ON(signal_pending(current));
1972 			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1973 					       TPS("fqswaitsig"));
1974 			ret = 1; /* Keep old FQS timing. */
1975 			j = jiffies;
1976 			if (time_after(jiffies, rcu_state.jiffies_force_qs))
1977 				j = 1;
1978 			else
1979 				j = rcu_state.jiffies_force_qs - j;
1980 			gf = 0;
1981 		}
1982 	}
1983 }
1984 
1985 /*
1986  * Clean up after the old grace period.
1987  */
rcu_gp_cleanup(void)1988 static void rcu_gp_cleanup(void)
1989 {
1990 	int cpu;
1991 	bool needgp = false;
1992 	unsigned long gp_duration;
1993 	unsigned long new_gp_seq;
1994 	bool offloaded;
1995 	struct rcu_data *rdp;
1996 	struct rcu_node *rnp = rcu_get_root();
1997 	struct swait_queue_head *sq;
1998 
1999 	WRITE_ONCE(rcu_state.gp_activity, jiffies);
2000 	raw_spin_lock_irq_rcu_node(rnp);
2001 	rcu_state.gp_end = jiffies;
2002 	gp_duration = rcu_state.gp_end - rcu_state.gp_start;
2003 	if (gp_duration > rcu_state.gp_max)
2004 		rcu_state.gp_max = gp_duration;
2005 
2006 	/*
2007 	 * We know the grace period is complete, but to everyone else
2008 	 * it appears to still be ongoing.  But it is also the case
2009 	 * that to everyone else it looks like there is nothing that
2010 	 * they can do to advance the grace period.  It is therefore
2011 	 * safe for us to drop the lock in order to mark the grace
2012 	 * period as completed in all of the rcu_node structures.
2013 	 */
2014 	raw_spin_unlock_irq_rcu_node(rnp);
2015 
2016 	/*
2017 	 * Propagate new ->gp_seq value to rcu_node structures so that
2018 	 * other CPUs don't have to wait until the start of the next grace
2019 	 * period to process their callbacks.  This also avoids some nasty
2020 	 * RCU grace-period initialization races by forcing the end of
2021 	 * the current grace period to be completely recorded in all of
2022 	 * the rcu_node structures before the beginning of the next grace
2023 	 * period is recorded in any of the rcu_node structures.
2024 	 */
2025 	new_gp_seq = rcu_state.gp_seq;
2026 	rcu_seq_end(&new_gp_seq);
2027 	rcu_for_each_node_breadth_first(rnp) {
2028 		raw_spin_lock_irq_rcu_node(rnp);
2029 		if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
2030 			dump_blkd_tasks(rnp, 10);
2031 		WARN_ON_ONCE(rnp->qsmask);
2032 		WRITE_ONCE(rnp->gp_seq, new_gp_seq);
2033 		rdp = this_cpu_ptr(&rcu_data);
2034 		if (rnp == rdp->mynode)
2035 			needgp = __note_gp_changes(rnp, rdp) || needgp;
2036 		/* smp_mb() provided by prior unlock-lock pair. */
2037 		needgp = rcu_future_gp_cleanup(rnp) || needgp;
2038 		// Reset overload indication for CPUs no longer overloaded
2039 		if (rcu_is_leaf_node(rnp))
2040 			for_each_leaf_node_cpu_mask(rnp, cpu, rnp->cbovldmask) {
2041 				rdp = per_cpu_ptr(&rcu_data, cpu);
2042 				check_cb_ovld_locked(rdp, rnp);
2043 			}
2044 		sq = rcu_nocb_gp_get(rnp);
2045 		raw_spin_unlock_irq_rcu_node(rnp);
2046 		rcu_nocb_gp_cleanup(sq);
2047 		cond_resched_tasks_rcu_qs();
2048 		WRITE_ONCE(rcu_state.gp_activity, jiffies);
2049 		rcu_gp_slow(gp_cleanup_delay);
2050 	}
2051 	rnp = rcu_get_root();
2052 	raw_spin_lock_irq_rcu_node(rnp); /* GP before ->gp_seq update. */
2053 
2054 	/* Declare grace period done, trace first to use old GP number. */
2055 	trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("end"));
2056 	rcu_seq_end(&rcu_state.gp_seq);
2057 	ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq);
2058 	rcu_state.gp_state = RCU_GP_IDLE;
2059 	/* Check for GP requests since above loop. */
2060 	rdp = this_cpu_ptr(&rcu_data);
2061 	if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) {
2062 		trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed,
2063 				  TPS("CleanupMore"));
2064 		needgp = true;
2065 	}
2066 	/* Advance CBs to reduce false positives below. */
2067 	offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
2068 		    rcu_segcblist_is_offloaded(&rdp->cblist);
2069 	if ((offloaded || !rcu_accelerate_cbs(rnp, rdp)) && needgp) {
2070 		WRITE_ONCE(rcu_state.gp_flags, RCU_GP_FLAG_INIT);
2071 		WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
2072 		trace_rcu_grace_period(rcu_state.name,
2073 				       rcu_state.gp_seq,
2074 				       TPS("newreq"));
2075 	} else {
2076 		WRITE_ONCE(rcu_state.gp_flags,
2077 			   rcu_state.gp_flags & RCU_GP_FLAG_INIT);
2078 	}
2079 	raw_spin_unlock_irq_rcu_node(rnp);
2080 
2081 	// If strict, make all CPUs aware of the end of the old grace period.
2082 	if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
2083 		on_each_cpu(rcu_strict_gp_boundary, NULL, 0);
2084 }
2085 
2086 /*
2087  * Body of kthread that handles grace periods.
2088  */
rcu_gp_kthread(void * unused)2089 static int __noreturn rcu_gp_kthread(void *unused)
2090 {
2091 	rcu_bind_gp_kthread();
2092 	for (;;) {
2093 
2094 		/* Handle grace-period start. */
2095 		for (;;) {
2096 			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2097 					       TPS("reqwait"));
2098 			rcu_state.gp_state = RCU_GP_WAIT_GPS;
2099 			swait_event_idle_exclusive(rcu_state.gp_wq,
2100 					 READ_ONCE(rcu_state.gp_flags) &
2101 					 RCU_GP_FLAG_INIT);
2102 			rcu_gp_torture_wait();
2103 			rcu_state.gp_state = RCU_GP_DONE_GPS;
2104 			/* Locking provides needed memory barrier. */
2105 			if (rcu_gp_init())
2106 				break;
2107 			cond_resched_tasks_rcu_qs();
2108 			WRITE_ONCE(rcu_state.gp_activity, jiffies);
2109 			WARN_ON(signal_pending(current));
2110 			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2111 					       TPS("reqwaitsig"));
2112 		}
2113 
2114 		/* Handle quiescent-state forcing. */
2115 		rcu_gp_fqs_loop();
2116 
2117 		/* Handle grace-period end. */
2118 		rcu_state.gp_state = RCU_GP_CLEANUP;
2119 		rcu_gp_cleanup();
2120 		rcu_state.gp_state = RCU_GP_CLEANED;
2121 	}
2122 }
2123 
2124 /*
2125  * Report a full set of quiescent states to the rcu_state data structure.
2126  * Invoke rcu_gp_kthread_wake() to awaken the grace-period kthread if
2127  * another grace period is required.  Whether we wake the grace-period
2128  * kthread or it awakens itself for the next round of quiescent-state
2129  * forcing, that kthread will clean up after the just-completed grace
2130  * period.  Note that the caller must hold rnp->lock, which is released
2131  * before return.
2132  */
rcu_report_qs_rsp(unsigned long flags)2133 static void rcu_report_qs_rsp(unsigned long flags)
2134 	__releases(rcu_get_root()->lock)
2135 {
2136 	raw_lockdep_assert_held_rcu_node(rcu_get_root());
2137 	WARN_ON_ONCE(!rcu_gp_in_progress());
2138 	WRITE_ONCE(rcu_state.gp_flags,
2139 		   READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS);
2140 	raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(), flags);
2141 	rcu_gp_kthread_wake();
2142 }
2143 
2144 /*
2145  * Similar to rcu_report_qs_rdp(), for which it is a helper function.
2146  * Allows quiescent states for a group of CPUs to be reported at one go
2147  * to the specified rcu_node structure, though all the CPUs in the group
2148  * must be represented by the same rcu_node structure (which need not be a
2149  * leaf rcu_node structure, though it often will be).  The gps parameter
2150  * is the grace-period snapshot, which means that the quiescent states
2151  * are valid only if rnp->gp_seq is equal to gps.  That structure's lock
2152  * must be held upon entry, and it is released before return.
2153  *
2154  * As a special case, if mask is zero, the bit-already-cleared check is
2155  * disabled.  This allows propagating quiescent state due to resumed tasks
2156  * during grace-period initialization.
2157  */
rcu_report_qs_rnp(unsigned long mask,struct rcu_node * rnp,unsigned long gps,unsigned long flags)2158 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
2159 			      unsigned long gps, unsigned long flags)
2160 	__releases(rnp->lock)
2161 {
2162 	unsigned long oldmask = 0;
2163 	struct rcu_node *rnp_c;
2164 
2165 	raw_lockdep_assert_held_rcu_node(rnp);
2166 
2167 	/* Walk up the rcu_node hierarchy. */
2168 	for (;;) {
2169 		if ((!(rnp->qsmask & mask) && mask) || rnp->gp_seq != gps) {
2170 
2171 			/*
2172 			 * Our bit has already been cleared, or the
2173 			 * relevant grace period is already over, so done.
2174 			 */
2175 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2176 			return;
2177 		}
2178 		WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */
2179 		WARN_ON_ONCE(!rcu_is_leaf_node(rnp) &&
2180 			     rcu_preempt_blocked_readers_cgp(rnp));
2181 		WRITE_ONCE(rnp->qsmask, rnp->qsmask & ~mask);
2182 		trace_rcu_quiescent_state_report(rcu_state.name, rnp->gp_seq,
2183 						 mask, rnp->qsmask, rnp->level,
2184 						 rnp->grplo, rnp->grphi,
2185 						 !!rnp->gp_tasks);
2186 		if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
2187 
2188 			/* Other bits still set at this level, so done. */
2189 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2190 			return;
2191 		}
2192 		rnp->completedqs = rnp->gp_seq;
2193 		mask = rnp->grpmask;
2194 		if (rnp->parent == NULL) {
2195 
2196 			/* No more levels.  Exit loop holding root lock. */
2197 
2198 			break;
2199 		}
2200 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2201 		rnp_c = rnp;
2202 		rnp = rnp->parent;
2203 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
2204 		oldmask = READ_ONCE(rnp_c->qsmask);
2205 	}
2206 
2207 	/*
2208 	 * Get here if we are the last CPU to pass through a quiescent
2209 	 * state for this grace period.  Invoke rcu_report_qs_rsp()
2210 	 * to clean up and start the next grace period if one is needed.
2211 	 */
2212 	rcu_report_qs_rsp(flags); /* releases rnp->lock. */
2213 }
2214 
2215 /*
2216  * Record a quiescent state for all tasks that were previously queued
2217  * on the specified rcu_node structure and that were blocking the current
2218  * RCU grace period.  The caller must hold the corresponding rnp->lock with
2219  * irqs disabled, and this lock is released upon return, but irqs remain
2220  * disabled.
2221  */
2222 static void __maybe_unused
rcu_report_unblock_qs_rnp(struct rcu_node * rnp,unsigned long flags)2223 rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
2224 	__releases(rnp->lock)
2225 {
2226 	unsigned long gps;
2227 	unsigned long mask;
2228 	struct rcu_node *rnp_p;
2229 
2230 	raw_lockdep_assert_held_rcu_node(rnp);
2231 	if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT_RCU)) ||
2232 	    WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)) ||
2233 	    rnp->qsmask != 0) {
2234 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2235 		return;  /* Still need more quiescent states! */
2236 	}
2237 
2238 	rnp->completedqs = rnp->gp_seq;
2239 	rnp_p = rnp->parent;
2240 	if (rnp_p == NULL) {
2241 		/*
2242 		 * Only one rcu_node structure in the tree, so don't
2243 		 * try to report up to its nonexistent parent!
2244 		 */
2245 		rcu_report_qs_rsp(flags);
2246 		return;
2247 	}
2248 
2249 	/* Report up the rest of the hierarchy, tracking current ->gp_seq. */
2250 	gps = rnp->gp_seq;
2251 	mask = rnp->grpmask;
2252 	raw_spin_unlock_rcu_node(rnp);	/* irqs remain disabled. */
2253 	raw_spin_lock_rcu_node(rnp_p);	/* irqs already disabled. */
2254 	rcu_report_qs_rnp(mask, rnp_p, gps, flags);
2255 }
2256 
2257 /*
2258  * Record a quiescent state for the specified CPU to that CPU's rcu_data
2259  * structure.  This must be called from the specified CPU.
2260  */
2261 static void
rcu_report_qs_rdp(struct rcu_data * rdp)2262 rcu_report_qs_rdp(struct rcu_data *rdp)
2263 {
2264 	unsigned long flags;
2265 	unsigned long mask;
2266 	bool needwake = false;
2267 	const bool offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
2268 			       rcu_segcblist_is_offloaded(&rdp->cblist);
2269 	struct rcu_node *rnp;
2270 
2271 	WARN_ON_ONCE(rdp->cpu != smp_processor_id());
2272 	rnp = rdp->mynode;
2273 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
2274 	if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq ||
2275 	    rdp->gpwrap) {
2276 
2277 		/*
2278 		 * The grace period in which this quiescent state was
2279 		 * recorded has ended, so don't report it upwards.
2280 		 * We will instead need a new quiescent state that lies
2281 		 * within the current grace period.
2282 		 */
2283 		rdp->cpu_no_qs.b.norm = true;	/* need qs for new gp. */
2284 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2285 		return;
2286 	}
2287 	mask = rdp->grpmask;
2288 	rdp->core_needs_qs = false;
2289 	if ((rnp->qsmask & mask) == 0) {
2290 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2291 	} else {
2292 		/*
2293 		 * This GP can't end until cpu checks in, so all of our
2294 		 * callbacks can be processed during the next GP.
2295 		 */
2296 		if (!offloaded)
2297 			needwake = rcu_accelerate_cbs(rnp, rdp);
2298 
2299 		rcu_disable_urgency_upon_qs(rdp);
2300 		rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
2301 		/* ^^^ Released rnp->lock */
2302 		if (needwake)
2303 			rcu_gp_kthread_wake();
2304 	}
2305 }
2306 
2307 /*
2308  * Check to see if there is a new grace period of which this CPU
2309  * is not yet aware, and if so, set up local rcu_data state for it.
2310  * Otherwise, see if this CPU has just passed through its first
2311  * quiescent state for this grace period, and record that fact if so.
2312  */
2313 static void
rcu_check_quiescent_state(struct rcu_data * rdp)2314 rcu_check_quiescent_state(struct rcu_data *rdp)
2315 {
2316 	/* Check for grace-period ends and beginnings. */
2317 	note_gp_changes(rdp);
2318 
2319 	/*
2320 	 * Does this CPU still need to do its part for current grace period?
2321 	 * If no, return and let the other CPUs do their part as well.
2322 	 */
2323 	if (!rdp->core_needs_qs)
2324 		return;
2325 
2326 	/*
2327 	 * Was there a quiescent state since the beginning of the grace
2328 	 * period? If no, then exit and wait for the next call.
2329 	 */
2330 	if (rdp->cpu_no_qs.b.norm)
2331 		return;
2332 
2333 	/*
2334 	 * Tell RCU we are done (but rcu_report_qs_rdp() will be the
2335 	 * judge of that).
2336 	 */
2337 	rcu_report_qs_rdp(rdp);
2338 }
2339 
2340 /*
2341  * Near the end of the offline process.  Trace the fact that this CPU
2342  * is going offline.
2343  */
rcutree_dying_cpu(unsigned int cpu)2344 int rcutree_dying_cpu(unsigned int cpu)
2345 {
2346 	bool blkd;
2347 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
2348 	struct rcu_node *rnp = rdp->mynode;
2349 
2350 	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
2351 		return 0;
2352 
2353 	blkd = !!(rnp->qsmask & rdp->grpmask);
2354 	trace_rcu_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
2355 			       blkd ? TPS("cpuofl") : TPS("cpuofl-bgp"));
2356 	return 0;
2357 }
2358 
2359 /*
2360  * All CPUs for the specified rcu_node structure have gone offline,
2361  * and all tasks that were preempted within an RCU read-side critical
2362  * section while running on one of those CPUs have since exited their RCU
2363  * read-side critical section.  Some other CPU is reporting this fact with
2364  * the specified rcu_node structure's ->lock held and interrupts disabled.
2365  * This function therefore goes up the tree of rcu_node structures,
2366  * clearing the corresponding bits in the ->qsmaskinit fields.  Note that
2367  * the leaf rcu_node structure's ->qsmaskinit field has already been
2368  * updated.
2369  *
2370  * This function does check that the specified rcu_node structure has
2371  * all CPUs offline and no blocked tasks, so it is OK to invoke it
2372  * prematurely.  That said, invoking it after the fact will cost you
2373  * a needless lock acquisition.  So once it has done its work, don't
2374  * invoke it again.
2375  */
rcu_cleanup_dead_rnp(struct rcu_node * rnp_leaf)2376 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
2377 {
2378 	long mask;
2379 	struct rcu_node *rnp = rnp_leaf;
2380 
2381 	raw_lockdep_assert_held_rcu_node(rnp_leaf);
2382 	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
2383 	    WARN_ON_ONCE(rnp_leaf->qsmaskinit) ||
2384 	    WARN_ON_ONCE(rcu_preempt_has_tasks(rnp_leaf)))
2385 		return;
2386 	for (;;) {
2387 		mask = rnp->grpmask;
2388 		rnp = rnp->parent;
2389 		if (!rnp)
2390 			break;
2391 		raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
2392 		rnp->qsmaskinit &= ~mask;
2393 		/* Between grace periods, so better already be zero! */
2394 		WARN_ON_ONCE(rnp->qsmask);
2395 		if (rnp->qsmaskinit) {
2396 			raw_spin_unlock_rcu_node(rnp);
2397 			/* irqs remain disabled. */
2398 			return;
2399 		}
2400 		raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
2401 	}
2402 }
2403 
2404 /*
2405  * The CPU has been completely removed, and some other CPU is reporting
2406  * this fact from process context.  Do the remainder of the cleanup.
2407  * There can only be one CPU hotplug operation at a time, so no need for
2408  * explicit locking.
2409  */
rcutree_dead_cpu(unsigned int cpu)2410 int rcutree_dead_cpu(unsigned int cpu)
2411 {
2412 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
2413 	struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */
2414 
2415 	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
2416 		return 0;
2417 
2418 	/* Adjust any no-longer-needed kthreads. */
2419 	rcu_boost_kthread_setaffinity(rnp, -1);
2420 	/* Do any needed no-CB deferred wakeups from this CPU. */
2421 	do_nocb_deferred_wakeup(per_cpu_ptr(&rcu_data, cpu));
2422 
2423 	// Stop-machine done, so allow nohz_full to disable tick.
2424 	tick_dep_clear(TICK_DEP_BIT_RCU);
2425 	return 0;
2426 }
2427 
2428 /*
2429  * Invoke any RCU callbacks that have made it to the end of their grace
2430  * period.  Thottle as specified by rdp->blimit.
2431  */
rcu_do_batch(struct rcu_data * rdp)2432 static void rcu_do_batch(struct rcu_data *rdp)
2433 {
2434 	int div;
2435 	unsigned long flags;
2436 	const bool offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
2437 			       rcu_segcblist_is_offloaded(&rdp->cblist);
2438 	struct rcu_head *rhp;
2439 	struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
2440 	long bl, count;
2441 	long pending, tlimit = 0;
2442 
2443 	/* If no callbacks are ready, just return. */
2444 	if (!rcu_segcblist_ready_cbs(&rdp->cblist)) {
2445 		trace_rcu_batch_start(rcu_state.name,
2446 				      rcu_segcblist_n_cbs(&rdp->cblist), 0);
2447 		trace_rcu_batch_end(rcu_state.name, 0,
2448 				    !rcu_segcblist_empty(&rdp->cblist),
2449 				    need_resched(), is_idle_task(current),
2450 				    rcu_is_callbacks_kthread());
2451 		return;
2452 	}
2453 
2454 	/*
2455 	 * Extract the list of ready callbacks, disabling to prevent
2456 	 * races with call_rcu() from interrupt handlers.  Leave the
2457 	 * callback counts, as rcu_barrier() needs to be conservative.
2458 	 */
2459 	local_irq_save(flags);
2460 	rcu_nocb_lock(rdp);
2461 	WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
2462 	pending = rcu_segcblist_n_cbs(&rdp->cblist);
2463 	div = READ_ONCE(rcu_divisor);
2464 	div = div < 0 ? 7 : div > sizeof(long) * 8 - 2 ? sizeof(long) * 8 - 2 : div;
2465 	bl = max(rdp->blimit, pending >> div);
2466 	if (in_serving_softirq() && unlikely(bl > 100)) {
2467 		long rrn = READ_ONCE(rcu_resched_ns);
2468 
2469 		rrn = rrn < NSEC_PER_MSEC ? NSEC_PER_MSEC : rrn > NSEC_PER_SEC ? NSEC_PER_SEC : rrn;
2470 		tlimit = local_clock() + rrn;
2471 	}
2472 	trace_rcu_batch_start(rcu_state.name,
2473 			      rcu_segcblist_n_cbs(&rdp->cblist), bl);
2474 	rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl);
2475 	if (offloaded)
2476 		rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
2477 	rcu_nocb_unlock_irqrestore(rdp, flags);
2478 
2479 	/* Invoke callbacks. */
2480 	tick_dep_set_task(current, TICK_DEP_BIT_RCU);
2481 	rhp = rcu_cblist_dequeue(&rcl);
2482 	for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) {
2483 		rcu_callback_t f;
2484 
2485 		debug_rcu_head_unqueue(rhp);
2486 
2487 		rcu_lock_acquire(&rcu_callback_map);
2488 		trace_rcu_invoke_callback(rcu_state.name, rhp);
2489 
2490 		f = rhp->func;
2491 		WRITE_ONCE(rhp->func, (rcu_callback_t)0L);
2492 		f(rhp);
2493 
2494 		rcu_lock_release(&rcu_callback_map);
2495 
2496 		/*
2497 		 * Stop only if limit reached and CPU has something to do.
2498 		 * Note: The rcl structure counts down from zero.
2499 		 */
2500 		if (in_serving_softirq()) {
2501 			if (-rcl.len >= bl && (need_resched() ||
2502 					(!is_idle_task(current) && !rcu_is_callbacks_kthread())))
2503 				break;
2504 
2505 			/*
2506 			 * Make sure we don't spend too much time here and deprive other
2507 			 * softirq vectors of CPU cycles.
2508 			 */
2509 			if (unlikely(tlimit)) {
2510 				/* only call local_clock() every 32 callbacks */
2511 				if (likely((-rcl.len & 31) || local_clock() < tlimit))
2512 					continue;
2513 				/* Exceeded the time limit, so leave. */
2514 				break;
2515 			}
2516 		} else {
2517 			local_bh_enable();
2518 			lockdep_assert_irqs_enabled();
2519 			cond_resched_tasks_rcu_qs();
2520 			lockdep_assert_irqs_enabled();
2521 			local_bh_disable();
2522 		}
2523 	}
2524 
2525 	local_irq_save(flags);
2526 	rcu_nocb_lock(rdp);
2527 	count = -rcl.len;
2528 	rdp->n_cbs_invoked += count;
2529 	trace_rcu_batch_end(rcu_state.name, count, !!rcl.head, need_resched(),
2530 			    is_idle_task(current), rcu_is_callbacks_kthread());
2531 
2532 	/* Update counts and requeue any remaining callbacks. */
2533 	rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl);
2534 	smp_mb(); /* List handling before counting for rcu_barrier(). */
2535 	rcu_segcblist_insert_count(&rdp->cblist, &rcl);
2536 
2537 	/* Reinstate batch limit if we have worked down the excess. */
2538 	count = rcu_segcblist_n_cbs(&rdp->cblist);
2539 	if (rdp->blimit >= DEFAULT_MAX_RCU_BLIMIT && count <= qlowmark)
2540 		rdp->blimit = blimit;
2541 
2542 	/* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
2543 	if (count == 0 && rdp->qlen_last_fqs_check != 0) {
2544 		rdp->qlen_last_fqs_check = 0;
2545 		rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
2546 	} else if (count < rdp->qlen_last_fqs_check - qhimark)
2547 		rdp->qlen_last_fqs_check = count;
2548 
2549 	/*
2550 	 * The following usually indicates a double call_rcu().  To track
2551 	 * this down, try building with CONFIG_DEBUG_OBJECTS_RCU_HEAD=y.
2552 	 */
2553 	WARN_ON_ONCE(count == 0 && !rcu_segcblist_empty(&rdp->cblist));
2554 	WARN_ON_ONCE(!IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
2555 		     count != 0 && rcu_segcblist_empty(&rdp->cblist));
2556 
2557 	rcu_nocb_unlock_irqrestore(rdp, flags);
2558 
2559 	/* Re-invoke RCU core processing if there are callbacks remaining. */
2560 	if (!offloaded && rcu_segcblist_ready_cbs(&rdp->cblist))
2561 		invoke_rcu_core();
2562 	tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
2563 }
2564 
2565 /*
2566  * This function is invoked from each scheduling-clock interrupt,
2567  * and checks to see if this CPU is in a non-context-switch quiescent
2568  * state, for example, user mode or idle loop.  It also schedules RCU
2569  * core processing.  If the current grace period has gone on too long,
2570  * it will ask the scheduler to manufacture a context switch for the sole
2571  * purpose of providing a providing the needed quiescent state.
2572  */
rcu_sched_clock_irq(int user)2573 void rcu_sched_clock_irq(int user)
2574 {
2575 	trace_rcu_utilization(TPS("Start scheduler-tick"));
2576 	lockdep_assert_irqs_disabled();
2577 	raw_cpu_inc(rcu_data.ticks_this_gp);
2578 	/* The load-acquire pairs with the store-release setting to true. */
2579 	if (smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) {
2580 		/* Idle and userspace execution already are quiescent states. */
2581 		if (!rcu_is_cpu_rrupt_from_idle() && !user) {
2582 			set_tsk_need_resched(current);
2583 			set_preempt_need_resched();
2584 		}
2585 		__this_cpu_write(rcu_data.rcu_urgent_qs, false);
2586 	}
2587 	rcu_flavor_sched_clock_irq(user);
2588 	if (rcu_pending(user))
2589 		invoke_rcu_core();
2590 	lockdep_assert_irqs_disabled();
2591 
2592 	trace_rcu_utilization(TPS("End scheduler-tick"));
2593 }
2594 
2595 /*
2596  * Scan the leaf rcu_node structures.  For each structure on which all
2597  * CPUs have reported a quiescent state and on which there are tasks
2598  * blocking the current grace period, initiate RCU priority boosting.
2599  * Otherwise, invoke the specified function to check dyntick state for
2600  * each CPU that has not yet reported a quiescent state.
2601  */
force_qs_rnp(int (* f)(struct rcu_data * rdp))2602 static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
2603 {
2604 	int cpu;
2605 	unsigned long flags;
2606 	unsigned long mask;
2607 	struct rcu_data *rdp;
2608 	struct rcu_node *rnp;
2609 
2610 	rcu_state.cbovld = rcu_state.cbovldnext;
2611 	rcu_state.cbovldnext = false;
2612 	rcu_for_each_leaf_node(rnp) {
2613 		cond_resched_tasks_rcu_qs();
2614 		mask = 0;
2615 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
2616 		rcu_state.cbovldnext |= !!rnp->cbovldmask;
2617 		if (rnp->qsmask == 0) {
2618 			if (rcu_preempt_blocked_readers_cgp(rnp)) {
2619 				/*
2620 				 * No point in scanning bits because they
2621 				 * are all zero.  But we might need to
2622 				 * priority-boost blocked readers.
2623 				 */
2624 				rcu_initiate_boost(rnp, flags);
2625 				/* rcu_initiate_boost() releases rnp->lock */
2626 				continue;
2627 			}
2628 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2629 			continue;
2630 		}
2631 		for_each_leaf_node_cpu_mask(rnp, cpu, rnp->qsmask) {
2632 			rdp = per_cpu_ptr(&rcu_data, cpu);
2633 			if (f(rdp)) {
2634 				mask |= rdp->grpmask;
2635 				rcu_disable_urgency_upon_qs(rdp);
2636 			}
2637 		}
2638 		if (mask != 0) {
2639 			/* Idle/offline CPUs, report (releases rnp->lock). */
2640 			rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
2641 		} else {
2642 			/* Nothing to do here, so just drop the lock. */
2643 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2644 		}
2645 	}
2646 }
2647 
2648 /*
2649  * Force quiescent states on reluctant CPUs, and also detect which
2650  * CPUs are in dyntick-idle mode.
2651  */
rcu_force_quiescent_state(void)2652 void rcu_force_quiescent_state(void)
2653 {
2654 	unsigned long flags;
2655 	bool ret;
2656 	struct rcu_node *rnp;
2657 	struct rcu_node *rnp_old = NULL;
2658 
2659 	/* Funnel through hierarchy to reduce memory contention. */
2660 	rnp = raw_cpu_read(rcu_data.mynode);
2661 	for (; rnp != NULL; rnp = rnp->parent) {
2662 		ret = (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) ||
2663 		       !raw_spin_trylock(&rnp->fqslock);
2664 		if (rnp_old != NULL)
2665 			raw_spin_unlock(&rnp_old->fqslock);
2666 		if (ret)
2667 			return;
2668 		rnp_old = rnp;
2669 	}
2670 	/* rnp_old == rcu_get_root(), rnp == NULL. */
2671 
2672 	/* Reached the root of the rcu_node tree, acquire lock. */
2673 	raw_spin_lock_irqsave_rcu_node(rnp_old, flags);
2674 	raw_spin_unlock(&rnp_old->fqslock);
2675 	if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
2676 		raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2677 		return;  /* Someone beat us to it. */
2678 	}
2679 	WRITE_ONCE(rcu_state.gp_flags,
2680 		   READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS);
2681 	raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2682 	rcu_gp_kthread_wake();
2683 }
2684 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
2685 
2686 // Workqueue handler for an RCU reader for kernels enforcing struct RCU
2687 // grace periods.
strict_work_handler(struct work_struct * work)2688 static void strict_work_handler(struct work_struct *work)
2689 {
2690 	rcu_read_lock();
2691 	rcu_read_unlock();
2692 }
2693 
2694 /* Perform RCU core processing work for the current CPU.  */
rcu_core(void)2695 static __latent_entropy void rcu_core(void)
2696 {
2697 	unsigned long flags;
2698 	struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);
2699 	struct rcu_node *rnp = rdp->mynode;
2700 	const bool offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
2701 			       rcu_segcblist_is_offloaded(&rdp->cblist);
2702 
2703 	if (cpu_is_offline(smp_processor_id()))
2704 		return;
2705 	trace_rcu_utilization(TPS("Start RCU core"));
2706 	WARN_ON_ONCE(!rdp->beenonline);
2707 
2708 	/* Report any deferred quiescent states if preemption enabled. */
2709 	if (!(preempt_count() & PREEMPT_MASK)) {
2710 		rcu_preempt_deferred_qs(current);
2711 	} else if (rcu_preempt_need_deferred_qs(current)) {
2712 		set_tsk_need_resched(current);
2713 		set_preempt_need_resched();
2714 	}
2715 
2716 	/* Update RCU state based on any recent quiescent states. */
2717 	rcu_check_quiescent_state(rdp);
2718 
2719 	/* No grace period and unregistered callbacks? */
2720 	if (!rcu_gp_in_progress() &&
2721 	    rcu_segcblist_is_enabled(&rdp->cblist) && !offloaded) {
2722 		local_irq_save(flags);
2723 		if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
2724 			rcu_accelerate_cbs_unlocked(rnp, rdp);
2725 		local_irq_restore(flags);
2726 	}
2727 
2728 	rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check());
2729 
2730 	/* If there are callbacks ready, invoke them. */
2731 	if (!offloaded && rcu_segcblist_ready_cbs(&rdp->cblist) &&
2732 	    likely(READ_ONCE(rcu_scheduler_fully_active)))
2733 		rcu_do_batch(rdp);
2734 
2735 	/* Do any needed deferred wakeups of rcuo kthreads. */
2736 	do_nocb_deferred_wakeup(rdp);
2737 	trace_rcu_utilization(TPS("End RCU core"));
2738 
2739 	// If strict GPs, schedule an RCU reader in a clean environment.
2740 	if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
2741 		queue_work_on(rdp->cpu, rcu_gp_wq, &rdp->strict_work);
2742 }
2743 
rcu_core_si(struct softirq_action * h)2744 static void rcu_core_si(struct softirq_action *h)
2745 {
2746 	rcu_core();
2747 }
2748 
rcu_wake_cond(struct task_struct * t,int status)2749 static void rcu_wake_cond(struct task_struct *t, int status)
2750 {
2751 	/*
2752 	 * If the thread is yielding, only wake it when this
2753 	 * is invoked from idle
2754 	 */
2755 	if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current)))
2756 		wake_up_process(t);
2757 }
2758 
invoke_rcu_core_kthread(void)2759 static void invoke_rcu_core_kthread(void)
2760 {
2761 	struct task_struct *t;
2762 	unsigned long flags;
2763 
2764 	local_irq_save(flags);
2765 	__this_cpu_write(rcu_data.rcu_cpu_has_work, 1);
2766 	t = __this_cpu_read(rcu_data.rcu_cpu_kthread_task);
2767 	if (t != NULL && t != current)
2768 		rcu_wake_cond(t, __this_cpu_read(rcu_data.rcu_cpu_kthread_status));
2769 	local_irq_restore(flags);
2770 }
2771 
2772 /*
2773  * Wake up this CPU's rcuc kthread to do RCU core processing.
2774  */
invoke_rcu_core(void)2775 static void invoke_rcu_core(void)
2776 {
2777 	if (!cpu_online(smp_processor_id()))
2778 		return;
2779 	if (use_softirq)
2780 		raise_softirq(RCU_SOFTIRQ);
2781 	else
2782 		invoke_rcu_core_kthread();
2783 }
2784 
rcu_cpu_kthread_park(unsigned int cpu)2785 static void rcu_cpu_kthread_park(unsigned int cpu)
2786 {
2787 	per_cpu(rcu_data.rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
2788 }
2789 
rcu_cpu_kthread_should_run(unsigned int cpu)2790 static int rcu_cpu_kthread_should_run(unsigned int cpu)
2791 {
2792 	return __this_cpu_read(rcu_data.rcu_cpu_has_work);
2793 }
2794 
2795 /*
2796  * Per-CPU kernel thread that invokes RCU callbacks.  This replaces
2797  * the RCU softirq used in configurations of RCU that do not support RCU
2798  * priority boosting.
2799  */
rcu_cpu_kthread(unsigned int cpu)2800 static void rcu_cpu_kthread(unsigned int cpu)
2801 {
2802 	unsigned int *statusp = this_cpu_ptr(&rcu_data.rcu_cpu_kthread_status);
2803 	char work, *workp = this_cpu_ptr(&rcu_data.rcu_cpu_has_work);
2804 	int spincnt;
2805 
2806 	trace_rcu_utilization(TPS("Start CPU kthread@rcu_run"));
2807 	for (spincnt = 0; spincnt < 10; spincnt++) {
2808 		local_bh_disable();
2809 		*statusp = RCU_KTHREAD_RUNNING;
2810 		local_irq_disable();
2811 		work = *workp;
2812 		*workp = 0;
2813 		local_irq_enable();
2814 		if (work)
2815 			rcu_core();
2816 		local_bh_enable();
2817 		if (*workp == 0) {
2818 			trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
2819 			*statusp = RCU_KTHREAD_WAITING;
2820 			return;
2821 		}
2822 	}
2823 	*statusp = RCU_KTHREAD_YIELDING;
2824 	trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
2825 	schedule_timeout_idle(2);
2826 	trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
2827 	*statusp = RCU_KTHREAD_WAITING;
2828 }
2829 
2830 static struct smp_hotplug_thread rcu_cpu_thread_spec = {
2831 	.store			= &rcu_data.rcu_cpu_kthread_task,
2832 	.thread_should_run	= rcu_cpu_kthread_should_run,
2833 	.thread_fn		= rcu_cpu_kthread,
2834 	.thread_comm		= "rcuc/%u",
2835 	.setup			= rcu_cpu_kthread_setup,
2836 	.park			= rcu_cpu_kthread_park,
2837 };
2838 
2839 /*
2840  * Spawn per-CPU RCU core processing kthreads.
2841  */
rcu_spawn_core_kthreads(void)2842 static int __init rcu_spawn_core_kthreads(void)
2843 {
2844 	int cpu;
2845 
2846 	for_each_possible_cpu(cpu)
2847 		per_cpu(rcu_data.rcu_cpu_has_work, cpu) = 0;
2848 	if (!IS_ENABLED(CONFIG_RCU_BOOST) && use_softirq)
2849 		return 0;
2850 	WARN_ONCE(smpboot_register_percpu_thread(&rcu_cpu_thread_spec),
2851 		  "%s: Could not start rcuc kthread, OOM is now expected behavior\n", __func__);
2852 	return 0;
2853 }
2854 
2855 /*
2856  * Handle any core-RCU processing required by a call_rcu() invocation.
2857  */
__call_rcu_core(struct rcu_data * rdp,struct rcu_head * head,unsigned long flags)2858 static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head,
2859 			    unsigned long flags)
2860 {
2861 	/*
2862 	 * If called from an extended quiescent state, invoke the RCU
2863 	 * core in order to force a re-evaluation of RCU's idleness.
2864 	 */
2865 	if (!rcu_is_watching())
2866 		invoke_rcu_core();
2867 
2868 	/* If interrupts were disabled or CPU offline, don't invoke RCU core. */
2869 	if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id()))
2870 		return;
2871 
2872 	/*
2873 	 * Force the grace period if too many callbacks or too long waiting.
2874 	 * Enforce hysteresis, and don't invoke rcu_force_quiescent_state()
2875 	 * if some other CPU has recently done so.  Also, don't bother
2876 	 * invoking rcu_force_quiescent_state() if the newly enqueued callback
2877 	 * is the only one waiting for a grace period to complete.
2878 	 */
2879 	if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) >
2880 		     rdp->qlen_last_fqs_check + qhimark)) {
2881 
2882 		/* Are we ignoring a completed grace period? */
2883 		note_gp_changes(rdp);
2884 
2885 		/* Start a new grace period if one not already started. */
2886 		if (!rcu_gp_in_progress()) {
2887 			rcu_accelerate_cbs_unlocked(rdp->mynode, rdp);
2888 		} else {
2889 			/* Give the grace period a kick. */
2890 			rdp->blimit = DEFAULT_MAX_RCU_BLIMIT;
2891 			if (READ_ONCE(rcu_state.n_force_qs) == rdp->n_force_qs_snap &&
2892 			    rcu_segcblist_first_pend_cb(&rdp->cblist) != head)
2893 				rcu_force_quiescent_state();
2894 			rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
2895 			rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
2896 		}
2897 	}
2898 }
2899 
2900 /*
2901  * RCU callback function to leak a callback.
2902  */
rcu_leak_callback(struct rcu_head * rhp)2903 static void rcu_leak_callback(struct rcu_head *rhp)
2904 {
2905 }
2906 
2907 /*
2908  * Check and if necessary update the leaf rcu_node structure's
2909  * ->cbovldmask bit corresponding to the current CPU based on that CPU's
2910  * number of queued RCU callbacks.  The caller must hold the leaf rcu_node
2911  * structure's ->lock.
2912  */
check_cb_ovld_locked(struct rcu_data * rdp,struct rcu_node * rnp)2913 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp)
2914 {
2915 	raw_lockdep_assert_held_rcu_node(rnp);
2916 	if (qovld_calc <= 0)
2917 		return; // Early boot and wildcard value set.
2918 	if (rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc)
2919 		WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask | rdp->grpmask);
2920 	else
2921 		WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask & ~rdp->grpmask);
2922 }
2923 
2924 /*
2925  * Check and if necessary update the leaf rcu_node structure's
2926  * ->cbovldmask bit corresponding to the current CPU based on that CPU's
2927  * number of queued RCU callbacks.  No locks need be held, but the
2928  * caller must have disabled interrupts.
2929  *
2930  * Note that this function ignores the possibility that there are a lot
2931  * of callbacks all of which have already seen the end of their respective
2932  * grace periods.  This omission is due to the need for no-CBs CPUs to
2933  * be holding ->nocb_lock to do this check, which is too heavy for a
2934  * common-case operation.
2935  */
check_cb_ovld(struct rcu_data * rdp)2936 static void check_cb_ovld(struct rcu_data *rdp)
2937 {
2938 	struct rcu_node *const rnp = rdp->mynode;
2939 
2940 	if (qovld_calc <= 0 ||
2941 	    ((rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc) ==
2942 	     !!(READ_ONCE(rnp->cbovldmask) & rdp->grpmask)))
2943 		return; // Early boot wildcard value or already set correctly.
2944 	raw_spin_lock_rcu_node(rnp);
2945 	check_cb_ovld_locked(rdp, rnp);
2946 	raw_spin_unlock_rcu_node(rnp);
2947 }
2948 
2949 /* Helper function for call_rcu() and friends.  */
2950 static void
__call_rcu(struct rcu_head * head,rcu_callback_t func)2951 __call_rcu(struct rcu_head *head, rcu_callback_t func)
2952 {
2953 	unsigned long flags;
2954 	struct rcu_data *rdp;
2955 	bool was_alldone;
2956 
2957 	/* Misaligned rcu_head! */
2958 	WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1));
2959 
2960 	if (debug_rcu_head_queue(head)) {
2961 		/*
2962 		 * Probable double call_rcu(), so leak the callback.
2963 		 * Use rcu:rcu_callback trace event to find the previous
2964 		 * time callback was passed to __call_rcu().
2965 		 */
2966 		WARN_ONCE(1, "__call_rcu(): Double-freed CB %p->%pS()!!!\n",
2967 			  head, head->func);
2968 		WRITE_ONCE(head->func, rcu_leak_callback);
2969 		return;
2970 	}
2971 	head->func = func;
2972 	head->next = NULL;
2973 	local_irq_save(flags);
2974 	kasan_record_aux_stack(head);
2975 	rdp = this_cpu_ptr(&rcu_data);
2976 
2977 	/* Add the callback to our list. */
2978 	if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist))) {
2979 		// This can trigger due to call_rcu() from offline CPU:
2980 		WARN_ON_ONCE(rcu_scheduler_active != RCU_SCHEDULER_INACTIVE);
2981 		WARN_ON_ONCE(!rcu_is_watching());
2982 		// Very early boot, before rcu_init().  Initialize if needed
2983 		// and then drop through to queue the callback.
2984 		if (rcu_segcblist_empty(&rdp->cblist))
2985 			rcu_segcblist_init(&rdp->cblist);
2986 	}
2987 
2988 	check_cb_ovld(rdp);
2989 	if (rcu_nocb_try_bypass(rdp, head, &was_alldone, flags))
2990 		return; // Enqueued onto ->nocb_bypass, so just leave.
2991 	// If no-CBs CPU gets here, rcu_nocb_try_bypass() acquired ->nocb_lock.
2992 	rcu_segcblist_enqueue(&rdp->cblist, head);
2993 	if (__is_kvfree_rcu_offset((unsigned long)func))
2994 		trace_rcu_kvfree_callback(rcu_state.name, head,
2995 					 (unsigned long)func,
2996 					 rcu_segcblist_n_cbs(&rdp->cblist));
2997 	else
2998 		trace_rcu_callback(rcu_state.name, head,
2999 				   rcu_segcblist_n_cbs(&rdp->cblist));
3000 
3001 	/* Go handle any RCU core processing required. */
3002 	if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
3003 	    unlikely(rcu_segcblist_is_offloaded(&rdp->cblist))) {
3004 		__call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */
3005 	} else {
3006 		__call_rcu_core(rdp, head, flags);
3007 		local_irq_restore(flags);
3008 	}
3009 }
3010 
3011 /**
3012  * call_rcu() - Queue an RCU callback for invocation after a grace period.
3013  * @head: structure to be used for queueing the RCU updates.
3014  * @func: actual callback function to be invoked after the grace period
3015  *
3016  * The callback function will be invoked some time after a full grace
3017  * period elapses, in other words after all pre-existing RCU read-side
3018  * critical sections have completed.  However, the callback function
3019  * might well execute concurrently with RCU read-side critical sections
3020  * that started after call_rcu() was invoked.  RCU read-side critical
3021  * sections are delimited by rcu_read_lock() and rcu_read_unlock(), and
3022  * may be nested.  In addition, regions of code across which interrupts,
3023  * preemption, or softirqs have been disabled also serve as RCU read-side
3024  * critical sections.  This includes hardware interrupt handlers, softirq
3025  * handlers, and NMI handlers.
3026  *
3027  * Note that all CPUs must agree that the grace period extended beyond
3028  * all pre-existing RCU read-side critical section.  On systems with more
3029  * than one CPU, this means that when "func()" is invoked, each CPU is
3030  * guaranteed to have executed a full memory barrier since the end of its
3031  * last RCU read-side critical section whose beginning preceded the call
3032  * to call_rcu().  It also means that each CPU executing an RCU read-side
3033  * critical section that continues beyond the start of "func()" must have
3034  * executed a memory barrier after the call_rcu() but before the beginning
3035  * of that RCU read-side critical section.  Note that these guarantees
3036  * include CPUs that are offline, idle, or executing in user mode, as
3037  * well as CPUs that are executing in the kernel.
3038  *
3039  * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
3040  * resulting RCU callback function "func()", then both CPU A and CPU B are
3041  * guaranteed to execute a full memory barrier during the time interval
3042  * between the call to call_rcu() and the invocation of "func()" -- even
3043  * if CPU A and CPU B are the same CPU (but again only if the system has
3044  * more than one CPU).
3045  */
call_rcu(struct rcu_head * head,rcu_callback_t func)3046 void call_rcu(struct rcu_head *head, rcu_callback_t func)
3047 {
3048 	__call_rcu(head, func);
3049 }
3050 EXPORT_SYMBOL_GPL(call_rcu);
3051 
3052 
3053 /* Maximum number of jiffies to wait before draining a batch. */
3054 #define KFREE_DRAIN_JIFFIES (HZ / 50)
3055 #define KFREE_N_BATCHES 2
3056 #define FREE_N_CHANNELS 2
3057 
3058 /**
3059  * struct kvfree_rcu_bulk_data - single block to store kvfree_rcu() pointers
3060  * @nr_records: Number of active pointers in the array
3061  * @next: Next bulk object in the block chain
3062  * @records: Array of the kvfree_rcu() pointers
3063  */
3064 struct kvfree_rcu_bulk_data {
3065 	unsigned long nr_records;
3066 	struct kvfree_rcu_bulk_data *next;
3067 	void *records[];
3068 };
3069 
3070 /*
3071  * This macro defines how many entries the "records" array
3072  * will contain. It is based on the fact that the size of
3073  * kvfree_rcu_bulk_data structure becomes exactly one page.
3074  */
3075 #define KVFREE_BULK_MAX_ENTR \
3076 	((PAGE_SIZE - sizeof(struct kvfree_rcu_bulk_data)) / sizeof(void *))
3077 
3078 /**
3079  * struct kfree_rcu_cpu_work - single batch of kfree_rcu() requests
3080  * @rcu_work: Let queue_rcu_work() invoke workqueue handler after grace period
3081  * @head_free: List of kfree_rcu() objects waiting for a grace period
3082  * @bkvhead_free: Bulk-List of kvfree_rcu() objects waiting for a grace period
3083  * @krcp: Pointer to @kfree_rcu_cpu structure
3084  */
3085 
3086 struct kfree_rcu_cpu_work {
3087 	struct rcu_work rcu_work;
3088 	struct rcu_head *head_free;
3089 	struct kvfree_rcu_bulk_data *bkvhead_free[FREE_N_CHANNELS];
3090 	struct kfree_rcu_cpu *krcp;
3091 };
3092 
3093 /**
3094  * struct kfree_rcu_cpu - batch up kfree_rcu() requests for RCU grace period
3095  * @head: List of kfree_rcu() objects not yet waiting for a grace period
3096  * @bkvhead: Bulk-List of kvfree_rcu() objects not yet waiting for a grace period
3097  * @krw_arr: Array of batches of kfree_rcu() objects waiting for a grace period
3098  * @lock: Synchronize access to this structure
3099  * @monitor_work: Promote @head to @head_free after KFREE_DRAIN_JIFFIES
3100  * @monitor_todo: Tracks whether a @monitor_work delayed work is pending
3101  * @initialized: The @rcu_work fields have been initialized
3102  * @count: Number of objects for which GP not started
3103  * @bkvcache:
3104  *	A simple cache list that contains objects for reuse purpose.
3105  *	In order to save some per-cpu space the list is singular.
3106  *	Even though it is lockless an access has to be protected by the
3107  *	per-cpu lock.
3108  * @page_cache_work: A work to refill the cache when it is empty
3109  * @work_in_progress: Indicates that page_cache_work is running
3110  * @hrtimer: A hrtimer for scheduling a page_cache_work
3111  * @nr_bkv_objs: number of allocated objects at @bkvcache.
3112  *
3113  * This is a per-CPU structure.  The reason that it is not included in
3114  * the rcu_data structure is to permit this code to be extracted from
3115  * the RCU files.  Such extraction could allow further optimization of
3116  * the interactions with the slab allocators.
3117  */
3118 struct kfree_rcu_cpu {
3119 	struct rcu_head *head;
3120 	struct kvfree_rcu_bulk_data *bkvhead[FREE_N_CHANNELS];
3121 	struct kfree_rcu_cpu_work krw_arr[KFREE_N_BATCHES];
3122 	raw_spinlock_t lock;
3123 	struct delayed_work monitor_work;
3124 	bool monitor_todo;
3125 	bool initialized;
3126 	int count;
3127 
3128 	struct work_struct page_cache_work;
3129 	atomic_t work_in_progress;
3130 	struct hrtimer hrtimer;
3131 
3132 	struct llist_head bkvcache;
3133 	int nr_bkv_objs;
3134 };
3135 
3136 static DEFINE_PER_CPU(struct kfree_rcu_cpu, krc) = {
3137 	.lock = __RAW_SPIN_LOCK_UNLOCKED(krc.lock),
3138 };
3139 
3140 static __always_inline void
debug_rcu_bhead_unqueue(struct kvfree_rcu_bulk_data * bhead)3141 debug_rcu_bhead_unqueue(struct kvfree_rcu_bulk_data *bhead)
3142 {
3143 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
3144 	int i;
3145 
3146 	for (i = 0; i < bhead->nr_records; i++)
3147 		debug_rcu_head_unqueue((struct rcu_head *)(bhead->records[i]));
3148 #endif
3149 }
3150 
3151 static inline struct kfree_rcu_cpu *
krc_this_cpu_lock(unsigned long * flags)3152 krc_this_cpu_lock(unsigned long *flags)
3153 {
3154 	struct kfree_rcu_cpu *krcp;
3155 
3156 	local_irq_save(*flags);	// For safely calling this_cpu_ptr().
3157 	krcp = this_cpu_ptr(&krc);
3158 	raw_spin_lock(&krcp->lock);
3159 
3160 	return krcp;
3161 }
3162 
3163 static inline void
krc_this_cpu_unlock(struct kfree_rcu_cpu * krcp,unsigned long flags)3164 krc_this_cpu_unlock(struct kfree_rcu_cpu *krcp, unsigned long flags)
3165 {
3166 	raw_spin_unlock(&krcp->lock);
3167 	local_irq_restore(flags);
3168 }
3169 
3170 static inline struct kvfree_rcu_bulk_data *
get_cached_bnode(struct kfree_rcu_cpu * krcp)3171 get_cached_bnode(struct kfree_rcu_cpu *krcp)
3172 {
3173 	if (!krcp->nr_bkv_objs)
3174 		return NULL;
3175 
3176 	krcp->nr_bkv_objs--;
3177 	return (struct kvfree_rcu_bulk_data *)
3178 		llist_del_first(&krcp->bkvcache);
3179 }
3180 
3181 static inline bool
put_cached_bnode(struct kfree_rcu_cpu * krcp,struct kvfree_rcu_bulk_data * bnode)3182 put_cached_bnode(struct kfree_rcu_cpu *krcp,
3183 	struct kvfree_rcu_bulk_data *bnode)
3184 {
3185 	// Check the limit.
3186 	if (krcp->nr_bkv_objs >= rcu_min_cached_objs)
3187 		return false;
3188 
3189 	llist_add((struct llist_node *) bnode, &krcp->bkvcache);
3190 	krcp->nr_bkv_objs++;
3191 	return true;
3192 
3193 }
3194 
3195 /*
3196  * This function is invoked in workqueue context after a grace period.
3197  * It frees all the objects queued on ->bhead_free or ->head_free.
3198  */
kfree_rcu_work(struct work_struct * work)3199 static void kfree_rcu_work(struct work_struct *work)
3200 {
3201 	unsigned long flags;
3202 	struct kvfree_rcu_bulk_data *bkvhead[FREE_N_CHANNELS], *bnext;
3203 	struct rcu_head *head, *next;
3204 	struct kfree_rcu_cpu *krcp;
3205 	struct kfree_rcu_cpu_work *krwp;
3206 	int i, j;
3207 
3208 	krwp = container_of(to_rcu_work(work),
3209 			    struct kfree_rcu_cpu_work, rcu_work);
3210 	krcp = krwp->krcp;
3211 
3212 	raw_spin_lock_irqsave(&krcp->lock, flags);
3213 	// Channels 1 and 2.
3214 	for (i = 0; i < FREE_N_CHANNELS; i++) {
3215 		bkvhead[i] = krwp->bkvhead_free[i];
3216 		krwp->bkvhead_free[i] = NULL;
3217 	}
3218 
3219 	// Channel 3.
3220 	head = krwp->head_free;
3221 	krwp->head_free = NULL;
3222 	raw_spin_unlock_irqrestore(&krcp->lock, flags);
3223 
3224 	// Handle two first channels.
3225 	for (i = 0; i < FREE_N_CHANNELS; i++) {
3226 		for (; bkvhead[i]; bkvhead[i] = bnext) {
3227 			bnext = bkvhead[i]->next;
3228 			debug_rcu_bhead_unqueue(bkvhead[i]);
3229 
3230 			rcu_lock_acquire(&rcu_callback_map);
3231 			if (i == 0) { // kmalloc() / kfree().
3232 				trace_rcu_invoke_kfree_bulk_callback(
3233 					rcu_state.name, bkvhead[i]->nr_records,
3234 					bkvhead[i]->records);
3235 
3236 				kfree_bulk(bkvhead[i]->nr_records,
3237 					bkvhead[i]->records);
3238 			} else { // vmalloc() / vfree().
3239 				for (j = 0; j < bkvhead[i]->nr_records; j++) {
3240 					trace_rcu_invoke_kvfree_callback(
3241 						rcu_state.name,
3242 						bkvhead[i]->records[j], 0);
3243 
3244 					vfree(bkvhead[i]->records[j]);
3245 				}
3246 			}
3247 			rcu_lock_release(&rcu_callback_map);
3248 
3249 			raw_spin_lock_irqsave(&krcp->lock, flags);
3250 			if (put_cached_bnode(krcp, bkvhead[i]))
3251 				bkvhead[i] = NULL;
3252 			raw_spin_unlock_irqrestore(&krcp->lock, flags);
3253 
3254 			if (bkvhead[i])
3255 				free_page((unsigned long) bkvhead[i]);
3256 
3257 			cond_resched_tasks_rcu_qs();
3258 		}
3259 	}
3260 
3261 	/*
3262 	 * Emergency case only. It can happen under low memory
3263 	 * condition when an allocation gets failed, so the "bulk"
3264 	 * path can not be temporary maintained.
3265 	 */
3266 	for (; head; head = next) {
3267 		unsigned long offset = (unsigned long)head->func;
3268 		void *ptr = (void *)head - offset;
3269 
3270 		next = head->next;
3271 		debug_rcu_head_unqueue((struct rcu_head *)ptr);
3272 		rcu_lock_acquire(&rcu_callback_map);
3273 		trace_rcu_invoke_kvfree_callback(rcu_state.name, head, offset);
3274 
3275 		if (!WARN_ON_ONCE(!__is_kvfree_rcu_offset(offset)))
3276 			kvfree(ptr);
3277 
3278 		rcu_lock_release(&rcu_callback_map);
3279 		cond_resched_tasks_rcu_qs();
3280 	}
3281 }
3282 
3283 /*
3284  * Schedule the kfree batch RCU work to run in workqueue context after a GP.
3285  *
3286  * This function is invoked by kfree_rcu_monitor() when the KFREE_DRAIN_JIFFIES
3287  * timeout has been reached.
3288  */
queue_kfree_rcu_work(struct kfree_rcu_cpu * krcp)3289 static inline bool queue_kfree_rcu_work(struct kfree_rcu_cpu *krcp)
3290 {
3291 	struct kfree_rcu_cpu_work *krwp;
3292 	bool repeat = false;
3293 	int i, j;
3294 
3295 	lockdep_assert_held(&krcp->lock);
3296 
3297 	for (i = 0; i < KFREE_N_BATCHES; i++) {
3298 		krwp = &(krcp->krw_arr[i]);
3299 
3300 		/*
3301 		 * Try to detach bkvhead or head and attach it over any
3302 		 * available corresponding free channel. It can be that
3303 		 * a previous RCU batch is in progress, it means that
3304 		 * immediately to queue another one is not possible so
3305 		 * return false to tell caller to retry.
3306 		 */
3307 		if ((krcp->bkvhead[0] && !krwp->bkvhead_free[0]) ||
3308 			(krcp->bkvhead[1] && !krwp->bkvhead_free[1]) ||
3309 				(krcp->head && !krwp->head_free)) {
3310 			// Channel 1 corresponds to SLAB ptrs.
3311 			// Channel 2 corresponds to vmalloc ptrs.
3312 			for (j = 0; j < FREE_N_CHANNELS; j++) {
3313 				if (!krwp->bkvhead_free[j]) {
3314 					krwp->bkvhead_free[j] = krcp->bkvhead[j];
3315 					krcp->bkvhead[j] = NULL;
3316 				}
3317 			}
3318 
3319 			// Channel 3 corresponds to emergency path.
3320 			if (!krwp->head_free) {
3321 				krwp->head_free = krcp->head;
3322 				krcp->head = NULL;
3323 			}
3324 
3325 			WRITE_ONCE(krcp->count, 0);
3326 
3327 			/*
3328 			 * One work is per one batch, so there are three
3329 			 * "free channels", the batch can handle. It can
3330 			 * be that the work is in the pending state when
3331 			 * channels have been detached following by each
3332 			 * other.
3333 			 */
3334 			queue_rcu_work(system_wq, &krwp->rcu_work);
3335 		}
3336 
3337 		// Repeat if any "free" corresponding channel is still busy.
3338 		if (krcp->bkvhead[0] || krcp->bkvhead[1] || krcp->head)
3339 			repeat = true;
3340 	}
3341 
3342 	return !repeat;
3343 }
3344 
kfree_rcu_drain_unlock(struct kfree_rcu_cpu * krcp,unsigned long flags)3345 static inline void kfree_rcu_drain_unlock(struct kfree_rcu_cpu *krcp,
3346 					  unsigned long flags)
3347 {
3348 	// Attempt to start a new batch.
3349 	krcp->monitor_todo = false;
3350 	if (queue_kfree_rcu_work(krcp)) {
3351 		// Success! Our job is done here.
3352 		raw_spin_unlock_irqrestore(&krcp->lock, flags);
3353 		return;
3354 	}
3355 
3356 	// Previous RCU batch still in progress, try again later.
3357 	krcp->monitor_todo = true;
3358 	schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
3359 	raw_spin_unlock_irqrestore(&krcp->lock, flags);
3360 }
3361 
3362 /*
3363  * This function is invoked after the KFREE_DRAIN_JIFFIES timeout.
3364  * It invokes kfree_rcu_drain_unlock() to attempt to start another batch.
3365  */
kfree_rcu_monitor(struct work_struct * work)3366 static void kfree_rcu_monitor(struct work_struct *work)
3367 {
3368 	unsigned long flags;
3369 	struct kfree_rcu_cpu *krcp = container_of(work, struct kfree_rcu_cpu,
3370 						 monitor_work.work);
3371 
3372 	raw_spin_lock_irqsave(&krcp->lock, flags);
3373 	if (krcp->monitor_todo)
3374 		kfree_rcu_drain_unlock(krcp, flags);
3375 	else
3376 		raw_spin_unlock_irqrestore(&krcp->lock, flags);
3377 }
3378 
3379 static enum hrtimer_restart
schedule_page_work_fn(struct hrtimer * t)3380 schedule_page_work_fn(struct hrtimer *t)
3381 {
3382 	struct kfree_rcu_cpu *krcp =
3383 		container_of(t, struct kfree_rcu_cpu, hrtimer);
3384 
3385 	queue_work(system_highpri_wq, &krcp->page_cache_work);
3386 	return HRTIMER_NORESTART;
3387 }
3388 
fill_page_cache_func(struct work_struct * work)3389 static void fill_page_cache_func(struct work_struct *work)
3390 {
3391 	struct kvfree_rcu_bulk_data *bnode;
3392 	struct kfree_rcu_cpu *krcp =
3393 		container_of(work, struct kfree_rcu_cpu,
3394 			page_cache_work);
3395 	unsigned long flags;
3396 	bool pushed;
3397 	int i;
3398 
3399 	for (i = 0; i < rcu_min_cached_objs; i++) {
3400 		bnode = (struct kvfree_rcu_bulk_data *)
3401 			__get_free_page(GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
3402 
3403 		if (!bnode)
3404 			break;
3405 
3406 		raw_spin_lock_irqsave(&krcp->lock, flags);
3407 		pushed = put_cached_bnode(krcp, bnode);
3408 		raw_spin_unlock_irqrestore(&krcp->lock, flags);
3409 
3410 		if (!pushed) {
3411 			free_page((unsigned long) bnode);
3412 			break;
3413 		}
3414 	}
3415 
3416 	atomic_set(&krcp->work_in_progress, 0);
3417 }
3418 
3419 static void
run_page_cache_worker(struct kfree_rcu_cpu * krcp)3420 run_page_cache_worker(struct kfree_rcu_cpu *krcp)
3421 {
3422 	if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
3423 			!atomic_xchg(&krcp->work_in_progress, 1)) {
3424 		hrtimer_init(&krcp->hrtimer, CLOCK_MONOTONIC,
3425 			HRTIMER_MODE_REL);
3426 		krcp->hrtimer.function = schedule_page_work_fn;
3427 		hrtimer_start(&krcp->hrtimer, 0, HRTIMER_MODE_REL);
3428 	}
3429 }
3430 
3431 static inline bool
kvfree_call_rcu_add_ptr_to_bulk(struct kfree_rcu_cpu * krcp,void * ptr)3432 kvfree_call_rcu_add_ptr_to_bulk(struct kfree_rcu_cpu *krcp, void *ptr)
3433 {
3434 	struct kvfree_rcu_bulk_data *bnode;
3435 	int idx;
3436 
3437 	if (unlikely(!krcp->initialized))
3438 		return false;
3439 
3440 	lockdep_assert_held(&krcp->lock);
3441 	idx = !!is_vmalloc_addr(ptr);
3442 
3443 	/* Check if a new block is required. */
3444 	if (!krcp->bkvhead[idx] ||
3445 			krcp->bkvhead[idx]->nr_records == KVFREE_BULK_MAX_ENTR) {
3446 		bnode = get_cached_bnode(krcp);
3447 		/* Switch to emergency path. */
3448 		if (!bnode)
3449 			return false;
3450 
3451 		/* Initialize the new block. */
3452 		bnode->nr_records = 0;
3453 		bnode->next = krcp->bkvhead[idx];
3454 
3455 		/* Attach it to the head. */
3456 		krcp->bkvhead[idx] = bnode;
3457 	}
3458 
3459 	/* Finally insert. */
3460 	krcp->bkvhead[idx]->records
3461 		[krcp->bkvhead[idx]->nr_records++] = ptr;
3462 
3463 	return true;
3464 }
3465 
3466 /*
3467  * Queue a request for lazy invocation of appropriate free routine after a
3468  * grace period. Please note there are three paths are maintained, two are the
3469  * main ones that use array of pointers interface and third one is emergency
3470  * one, that is used only when the main path can not be maintained temporary,
3471  * due to memory pressure.
3472  *
3473  * Each kvfree_call_rcu() request is added to a batch. The batch will be drained
3474  * every KFREE_DRAIN_JIFFIES number of jiffies. All the objects in the batch will
3475  * be free'd in workqueue context. This allows us to: batch requests together to
3476  * reduce the number of grace periods during heavy kfree_rcu()/kvfree_rcu() load.
3477  */
kvfree_call_rcu(struct rcu_head * head,rcu_callback_t func)3478 void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
3479 {
3480 	unsigned long flags;
3481 	struct kfree_rcu_cpu *krcp;
3482 	bool success;
3483 	void *ptr;
3484 
3485 	if (head) {
3486 		ptr = (void *) head - (unsigned long) func;
3487 	} else {
3488 		/*
3489 		 * Please note there is a limitation for the head-less
3490 		 * variant, that is why there is a clear rule for such
3491 		 * objects: it can be used from might_sleep() context
3492 		 * only. For other places please embed an rcu_head to
3493 		 * your data.
3494 		 */
3495 		might_sleep();
3496 		ptr = (unsigned long *) func;
3497 	}
3498 
3499 	krcp = krc_this_cpu_lock(&flags);
3500 
3501 	// Queue the object but don't yet schedule the batch.
3502 	if (debug_rcu_head_queue(ptr)) {
3503 		// Probable double kfree_rcu(), just leak.
3504 		WARN_ONCE(1, "%s(): Double-freed call. rcu_head %p\n",
3505 			  __func__, head);
3506 
3507 		// Mark as success and leave.
3508 		success = true;
3509 		goto unlock_return;
3510 	}
3511 
3512 	success = kvfree_call_rcu_add_ptr_to_bulk(krcp, ptr);
3513 	if (!success) {
3514 		run_page_cache_worker(krcp);
3515 
3516 		if (head == NULL)
3517 			// Inline if kvfree_rcu(one_arg) call.
3518 			goto unlock_return;
3519 
3520 		head->func = func;
3521 		head->next = krcp->head;
3522 		krcp->head = head;
3523 		success = true;
3524 	}
3525 
3526 	WRITE_ONCE(krcp->count, krcp->count + 1);
3527 
3528 	// Set timer to drain after KFREE_DRAIN_JIFFIES.
3529 	if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
3530 	    !krcp->monitor_todo) {
3531 		krcp->monitor_todo = true;
3532 		schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
3533 	}
3534 
3535 unlock_return:
3536 	krc_this_cpu_unlock(krcp, flags);
3537 
3538 	/*
3539 	 * Inline kvfree() after synchronize_rcu(). We can do
3540 	 * it from might_sleep() context only, so the current
3541 	 * CPU can pass the QS state.
3542 	 */
3543 	if (!success) {
3544 		debug_rcu_head_unqueue((struct rcu_head *) ptr);
3545 		synchronize_rcu();
3546 		kvfree(ptr);
3547 	}
3548 }
3549 EXPORT_SYMBOL_GPL(kvfree_call_rcu);
3550 
3551 static unsigned long
kfree_rcu_shrink_count(struct shrinker * shrink,struct shrink_control * sc)3552 kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
3553 {
3554 	int cpu;
3555 	unsigned long count = 0;
3556 
3557 	/* Snapshot count of all CPUs */
3558 	for_each_possible_cpu(cpu) {
3559 		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3560 
3561 		count += READ_ONCE(krcp->count);
3562 	}
3563 
3564 	return count;
3565 }
3566 
3567 static unsigned long
kfree_rcu_shrink_scan(struct shrinker * shrink,struct shrink_control * sc)3568 kfree_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
3569 {
3570 	int cpu, freed = 0;
3571 	unsigned long flags;
3572 
3573 	for_each_possible_cpu(cpu) {
3574 		int count;
3575 		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3576 
3577 		count = krcp->count;
3578 		raw_spin_lock_irqsave(&krcp->lock, flags);
3579 		if (krcp->monitor_todo)
3580 			kfree_rcu_drain_unlock(krcp, flags);
3581 		else
3582 			raw_spin_unlock_irqrestore(&krcp->lock, flags);
3583 
3584 		sc->nr_to_scan -= count;
3585 		freed += count;
3586 
3587 		if (sc->nr_to_scan <= 0)
3588 			break;
3589 	}
3590 
3591 	return freed == 0 ? SHRINK_STOP : freed;
3592 }
3593 
3594 static struct shrinker kfree_rcu_shrinker = {
3595 	.count_objects = kfree_rcu_shrink_count,
3596 	.scan_objects = kfree_rcu_shrink_scan,
3597 	.batch = 0,
3598 	.seeks = DEFAULT_SEEKS,
3599 };
3600 
kfree_rcu_scheduler_running(void)3601 void __init kfree_rcu_scheduler_running(void)
3602 {
3603 	int cpu;
3604 	unsigned long flags;
3605 
3606 	for_each_possible_cpu(cpu) {
3607 		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3608 
3609 		raw_spin_lock_irqsave(&krcp->lock, flags);
3610 		if (!krcp->head || krcp->monitor_todo) {
3611 			raw_spin_unlock_irqrestore(&krcp->lock, flags);
3612 			continue;
3613 		}
3614 		krcp->monitor_todo = true;
3615 		schedule_delayed_work_on(cpu, &krcp->monitor_work,
3616 					 KFREE_DRAIN_JIFFIES);
3617 		raw_spin_unlock_irqrestore(&krcp->lock, flags);
3618 	}
3619 }
3620 
3621 /*
3622  * During early boot, any blocking grace-period wait automatically
3623  * implies a grace period.  Later on, this is never the case for PREEMPTION.
3624  *
3625  * Howevr, because a context switch is a grace period for !PREEMPTION, any
3626  * blocking grace-period wait automatically implies a grace period if
3627  * there is only one CPU online at any point time during execution of
3628  * either synchronize_rcu() or synchronize_rcu_expedited().  It is OK to
3629  * occasionally incorrectly indicate that there are multiple CPUs online
3630  * when there was in fact only one the whole time, as this just adds some
3631  * overhead: RCU still operates correctly.
3632  */
rcu_blocking_is_gp(void)3633 static int rcu_blocking_is_gp(void)
3634 {
3635 	int ret;
3636 
3637 	if (IS_ENABLED(CONFIG_PREEMPTION))
3638 		return rcu_scheduler_active == RCU_SCHEDULER_INACTIVE;
3639 	might_sleep();  /* Check for RCU read-side critical section. */
3640 	preempt_disable();
3641 	ret = num_online_cpus() <= 1;
3642 	preempt_enable();
3643 	return ret;
3644 }
3645 
3646 /**
3647  * synchronize_rcu - wait until a grace period has elapsed.
3648  *
3649  * Control will return to the caller some time after a full grace
3650  * period has elapsed, in other words after all currently executing RCU
3651  * read-side critical sections have completed.  Note, however, that
3652  * upon return from synchronize_rcu(), the caller might well be executing
3653  * concurrently with new RCU read-side critical sections that began while
3654  * synchronize_rcu() was waiting.  RCU read-side critical sections are
3655  * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
3656  * In addition, regions of code across which interrupts, preemption, or
3657  * softirqs have been disabled also serve as RCU read-side critical
3658  * sections.  This includes hardware interrupt handlers, softirq handlers,
3659  * and NMI handlers.
3660  *
3661  * Note that this guarantee implies further memory-ordering guarantees.
3662  * On systems with more than one CPU, when synchronize_rcu() returns,
3663  * each CPU is guaranteed to have executed a full memory barrier since
3664  * the end of its last RCU read-side critical section whose beginning
3665  * preceded the call to synchronize_rcu().  In addition, each CPU having
3666  * an RCU read-side critical section that extends beyond the return from
3667  * synchronize_rcu() is guaranteed to have executed a full memory barrier
3668  * after the beginning of synchronize_rcu() and before the beginning of
3669  * that RCU read-side critical section.  Note that these guarantees include
3670  * CPUs that are offline, idle, or executing in user mode, as well as CPUs
3671  * that are executing in the kernel.
3672  *
3673  * Furthermore, if CPU A invoked synchronize_rcu(), which returned
3674  * to its caller on CPU B, then both CPU A and CPU B are guaranteed
3675  * to have executed a full memory barrier during the execution of
3676  * synchronize_rcu() -- even if CPU A and CPU B are the same CPU (but
3677  * again only if the system has more than one CPU).
3678  */
synchronize_rcu(void)3679 void synchronize_rcu(void)
3680 {
3681 	RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
3682 			 lock_is_held(&rcu_lock_map) ||
3683 			 lock_is_held(&rcu_sched_lock_map),
3684 			 "Illegal synchronize_rcu() in RCU read-side critical section");
3685 	if (rcu_blocking_is_gp())
3686 		return;
3687 	if (rcu_gp_is_expedited())
3688 		synchronize_rcu_expedited();
3689 	else
3690 		wait_rcu_gp(call_rcu);
3691 }
3692 EXPORT_SYMBOL_GPL(synchronize_rcu);
3693 
3694 /**
3695  * get_state_synchronize_rcu - Snapshot current RCU state
3696  *
3697  * Returns a cookie that is used by a later call to cond_synchronize_rcu()
3698  * to determine whether or not a full grace period has elapsed in the
3699  * meantime.
3700  */
get_state_synchronize_rcu(void)3701 unsigned long get_state_synchronize_rcu(void)
3702 {
3703 	/*
3704 	 * Any prior manipulation of RCU-protected data must happen
3705 	 * before the load from ->gp_seq.
3706 	 */
3707 	smp_mb();  /* ^^^ */
3708 	return rcu_seq_snap(&rcu_state.gp_seq);
3709 }
3710 EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
3711 
3712 /**
3713  * cond_synchronize_rcu - Conditionally wait for an RCU grace period
3714  *
3715  * @oldstate: return value from earlier call to get_state_synchronize_rcu()
3716  *
3717  * If a full RCU grace period has elapsed since the earlier call to
3718  * get_state_synchronize_rcu(), just return.  Otherwise, invoke
3719  * synchronize_rcu() to wait for a full grace period.
3720  *
3721  * Yes, this function does not take counter wrap into account.  But
3722  * counter wrap is harmless.  If the counter wraps, we have waited for
3723  * more than 2 billion grace periods (and way more on a 64-bit system!),
3724  * so waiting for one additional grace period should be just fine.
3725  */
cond_synchronize_rcu(unsigned long oldstate)3726 void cond_synchronize_rcu(unsigned long oldstate)
3727 {
3728 	if (!rcu_seq_done(&rcu_state.gp_seq, oldstate))
3729 		synchronize_rcu();
3730 	else
3731 		smp_mb(); /* Ensure GP ends before subsequent accesses. */
3732 }
3733 EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
3734 
3735 /*
3736  * Check to see if there is any immediate RCU-related work to be done by
3737  * the current CPU, returning 1 if so and zero otherwise.  The checks are
3738  * in order of increasing expense: checks that can be carried out against
3739  * CPU-local state are performed first.  However, we must check for CPU
3740  * stalls first, else we might not get a chance.
3741  */
rcu_pending(int user)3742 static int rcu_pending(int user)
3743 {
3744 	bool gp_in_progress;
3745 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
3746 	struct rcu_node *rnp = rdp->mynode;
3747 
3748 	lockdep_assert_irqs_disabled();
3749 
3750 	/* Check for CPU stalls, if enabled. */
3751 	check_cpu_stall(rdp);
3752 
3753 	/* Does this CPU need a deferred NOCB wakeup? */
3754 	if (rcu_nocb_need_deferred_wakeup(rdp))
3755 		return 1;
3756 
3757 	/* Is this a nohz_full CPU in userspace or idle?  (Ignore RCU if so.) */
3758 	if ((user || rcu_is_cpu_rrupt_from_idle()) && rcu_nohz_full_cpu())
3759 		return 0;
3760 
3761 	/* Is the RCU core waiting for a quiescent state from this CPU? */
3762 	gp_in_progress = rcu_gp_in_progress();
3763 	if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm && gp_in_progress)
3764 		return 1;
3765 
3766 	/* Does this CPU have callbacks ready to invoke? */
3767 	if (rcu_segcblist_ready_cbs(&rdp->cblist))
3768 		return 1;
3769 
3770 	/* Has RCU gone idle with this CPU needing another grace period? */
3771 	if (!gp_in_progress && rcu_segcblist_is_enabled(&rdp->cblist) &&
3772 	    (!IS_ENABLED(CONFIG_RCU_NOCB_CPU) ||
3773 	     !rcu_segcblist_is_offloaded(&rdp->cblist)) &&
3774 	    !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
3775 		return 1;
3776 
3777 	/* Have RCU grace period completed or started?  */
3778 	if (rcu_seq_current(&rnp->gp_seq) != rdp->gp_seq ||
3779 	    unlikely(READ_ONCE(rdp->gpwrap))) /* outside lock */
3780 		return 1;
3781 
3782 	/* nothing to do */
3783 	return 0;
3784 }
3785 
3786 /*
3787  * Helper function for rcu_barrier() tracing.  If tracing is disabled,
3788  * the compiler is expected to optimize this away.
3789  */
rcu_barrier_trace(const char * s,int cpu,unsigned long done)3790 static void rcu_barrier_trace(const char *s, int cpu, unsigned long done)
3791 {
3792 	trace_rcu_barrier(rcu_state.name, s, cpu,
3793 			  atomic_read(&rcu_state.barrier_cpu_count), done);
3794 }
3795 
3796 /*
3797  * RCU callback function for rcu_barrier().  If we are last, wake
3798  * up the task executing rcu_barrier().
3799  *
3800  * Note that the value of rcu_state.barrier_sequence must be captured
3801  * before the atomic_dec_and_test().  Otherwise, if this CPU is not last,
3802  * other CPUs might count the value down to zero before this CPU gets
3803  * around to invoking rcu_barrier_trace(), which might result in bogus
3804  * data from the next instance of rcu_barrier().
3805  */
rcu_barrier_callback(struct rcu_head * rhp)3806 static void rcu_barrier_callback(struct rcu_head *rhp)
3807 {
3808 	unsigned long __maybe_unused s = rcu_state.barrier_sequence;
3809 
3810 	if (atomic_dec_and_test(&rcu_state.barrier_cpu_count)) {
3811 		rcu_barrier_trace(TPS("LastCB"), -1, s);
3812 		complete(&rcu_state.barrier_completion);
3813 	} else {
3814 		rcu_barrier_trace(TPS("CB"), -1, s);
3815 	}
3816 }
3817 
3818 /*
3819  * Called with preemption disabled, and from cross-cpu IRQ context.
3820  */
rcu_barrier_func(void * cpu_in)3821 static void rcu_barrier_func(void *cpu_in)
3822 {
3823 	uintptr_t cpu = (uintptr_t)cpu_in;
3824 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
3825 
3826 	rcu_barrier_trace(TPS("IRQ"), -1, rcu_state.barrier_sequence);
3827 	rdp->barrier_head.func = rcu_barrier_callback;
3828 	debug_rcu_head_queue(&rdp->barrier_head);
3829 	rcu_nocb_lock(rdp);
3830 	WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies));
3831 	if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head)) {
3832 		atomic_inc(&rcu_state.barrier_cpu_count);
3833 	} else {
3834 		debug_rcu_head_unqueue(&rdp->barrier_head);
3835 		rcu_barrier_trace(TPS("IRQNQ"), -1,
3836 				  rcu_state.barrier_sequence);
3837 	}
3838 	rcu_nocb_unlock(rdp);
3839 }
3840 
3841 /**
3842  * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
3843  *
3844  * Note that this primitive does not necessarily wait for an RCU grace period
3845  * to complete.  For example, if there are no RCU callbacks queued anywhere
3846  * in the system, then rcu_barrier() is within its rights to return
3847  * immediately, without waiting for anything, much less an RCU grace period.
3848  */
rcu_barrier(void)3849 void rcu_barrier(void)
3850 {
3851 	uintptr_t cpu;
3852 	struct rcu_data *rdp;
3853 	unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence);
3854 
3855 	rcu_barrier_trace(TPS("Begin"), -1, s);
3856 
3857 	/* Take mutex to serialize concurrent rcu_barrier() requests. */
3858 	mutex_lock(&rcu_state.barrier_mutex);
3859 
3860 	/* Did someone else do our work for us? */
3861 	if (rcu_seq_done(&rcu_state.barrier_sequence, s)) {
3862 		rcu_barrier_trace(TPS("EarlyExit"), -1,
3863 				  rcu_state.barrier_sequence);
3864 		smp_mb(); /* caller's subsequent code after above check. */
3865 		mutex_unlock(&rcu_state.barrier_mutex);
3866 		return;
3867 	}
3868 
3869 	/* Mark the start of the barrier operation. */
3870 	rcu_seq_start(&rcu_state.barrier_sequence);
3871 	rcu_barrier_trace(TPS("Inc1"), -1, rcu_state.barrier_sequence);
3872 
3873 	/*
3874 	 * Initialize the count to two rather than to zero in order
3875 	 * to avoid a too-soon return to zero in case of an immediate
3876 	 * invocation of the just-enqueued callback (or preemption of
3877 	 * this task).  Exclude CPU-hotplug operations to ensure that no
3878 	 * offline non-offloaded CPU has callbacks queued.
3879 	 */
3880 	init_completion(&rcu_state.barrier_completion);
3881 	atomic_set(&rcu_state.barrier_cpu_count, 2);
3882 	get_online_cpus();
3883 
3884 	/*
3885 	 * Force each CPU with callbacks to register a new callback.
3886 	 * When that callback is invoked, we will know that all of the
3887 	 * corresponding CPU's preceding callbacks have been invoked.
3888 	 */
3889 	for_each_possible_cpu(cpu) {
3890 		rdp = per_cpu_ptr(&rcu_data, cpu);
3891 		if (cpu_is_offline(cpu) &&
3892 		    !rcu_segcblist_is_offloaded(&rdp->cblist))
3893 			continue;
3894 		if (rcu_segcblist_n_cbs(&rdp->cblist) && cpu_online(cpu)) {
3895 			rcu_barrier_trace(TPS("OnlineQ"), cpu,
3896 					  rcu_state.barrier_sequence);
3897 			smp_call_function_single(cpu, rcu_barrier_func, (void *)cpu, 1);
3898 		} else if (rcu_segcblist_n_cbs(&rdp->cblist) &&
3899 			   cpu_is_offline(cpu)) {
3900 			rcu_barrier_trace(TPS("OfflineNoCBQ"), cpu,
3901 					  rcu_state.barrier_sequence);
3902 			local_irq_disable();
3903 			rcu_barrier_func((void *)cpu);
3904 			local_irq_enable();
3905 		} else if (cpu_is_offline(cpu)) {
3906 			rcu_barrier_trace(TPS("OfflineNoCBNoQ"), cpu,
3907 					  rcu_state.barrier_sequence);
3908 		} else {
3909 			rcu_barrier_trace(TPS("OnlineNQ"), cpu,
3910 					  rcu_state.barrier_sequence);
3911 		}
3912 	}
3913 	put_online_cpus();
3914 
3915 	/*
3916 	 * Now that we have an rcu_barrier_callback() callback on each
3917 	 * CPU, and thus each counted, remove the initial count.
3918 	 */
3919 	if (atomic_sub_and_test(2, &rcu_state.barrier_cpu_count))
3920 		complete(&rcu_state.barrier_completion);
3921 
3922 	/* Wait for all rcu_barrier_callback() callbacks to be invoked. */
3923 	wait_for_completion(&rcu_state.barrier_completion);
3924 
3925 	/* Mark the end of the barrier operation. */
3926 	rcu_barrier_trace(TPS("Inc2"), -1, rcu_state.barrier_sequence);
3927 	rcu_seq_end(&rcu_state.barrier_sequence);
3928 
3929 	/* Other rcu_barrier() invocations can now safely proceed. */
3930 	mutex_unlock(&rcu_state.barrier_mutex);
3931 }
3932 EXPORT_SYMBOL_GPL(rcu_barrier);
3933 
3934 /*
3935  * Propagate ->qsinitmask bits up the rcu_node tree to account for the
3936  * first CPU in a given leaf rcu_node structure coming online.  The caller
3937  * must hold the corresponding leaf rcu_node ->lock with interrrupts
3938  * disabled.
3939  */
rcu_init_new_rnp(struct rcu_node * rnp_leaf)3940 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
3941 {
3942 	long mask;
3943 	long oldmask;
3944 	struct rcu_node *rnp = rnp_leaf;
3945 
3946 	raw_lockdep_assert_held_rcu_node(rnp_leaf);
3947 	WARN_ON_ONCE(rnp->wait_blkd_tasks);
3948 	for (;;) {
3949 		mask = rnp->grpmask;
3950 		rnp = rnp->parent;
3951 		if (rnp == NULL)
3952 			return;
3953 		raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */
3954 		oldmask = rnp->qsmaskinit;
3955 		rnp->qsmaskinit |= mask;
3956 		raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */
3957 		if (oldmask)
3958 			return;
3959 	}
3960 }
3961 
3962 /*
3963  * Do boot-time initialization of a CPU's per-CPU RCU data.
3964  */
3965 static void __init
rcu_boot_init_percpu_data(int cpu)3966 rcu_boot_init_percpu_data(int cpu)
3967 {
3968 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
3969 
3970 	/* Set up local state, ensuring consistent view of global state. */
3971 	rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
3972 	INIT_WORK(&rdp->strict_work, strict_work_handler);
3973 	WARN_ON_ONCE(rdp->dynticks_nesting != 1);
3974 	WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp)));
3975 	rdp->rcu_ofl_gp_seq = rcu_state.gp_seq;
3976 	rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED;
3977 	rdp->rcu_onl_gp_seq = rcu_state.gp_seq;
3978 	rdp->rcu_onl_gp_flags = RCU_GP_CLEANED;
3979 	rdp->cpu = cpu;
3980 	rcu_boot_init_nocb_percpu_data(rdp);
3981 }
3982 
3983 /*
3984  * Invoked early in the CPU-online process, when pretty much all services
3985  * are available.  The incoming CPU is not present.
3986  *
3987  * Initializes a CPU's per-CPU RCU data.  Note that only one online or
3988  * offline event can be happening at a given time.  Note also that we can
3989  * accept some slop in the rsp->gp_seq access due to the fact that this
3990  * CPU cannot possibly have any non-offloaded RCU callbacks in flight yet.
3991  * And any offloaded callbacks are being numbered elsewhere.
3992  */
rcutree_prepare_cpu(unsigned int cpu)3993 int rcutree_prepare_cpu(unsigned int cpu)
3994 {
3995 	unsigned long flags;
3996 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
3997 	struct rcu_node *rnp = rcu_get_root();
3998 
3999 	/* Set up local state, ensuring consistent view of global state. */
4000 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
4001 	rdp->qlen_last_fqs_check = 0;
4002 	rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
4003 	rdp->blimit = blimit;
4004 	if (rcu_segcblist_empty(&rdp->cblist) && /* No early-boot CBs? */
4005 	    !rcu_segcblist_is_offloaded(&rdp->cblist))
4006 		rcu_segcblist_init(&rdp->cblist);  /* Re-enable callbacks. */
4007 	rdp->dynticks_nesting = 1;	/* CPU not up, no tearing. */
4008 	rcu_dynticks_eqs_online();
4009 	raw_spin_unlock_rcu_node(rnp);		/* irqs remain disabled. */
4010 
4011 	/*
4012 	 * Add CPU to leaf rcu_node pending-online bitmask.  Any needed
4013 	 * propagation up the rcu_node tree will happen at the beginning
4014 	 * of the next grace period.
4015 	 */
4016 	rnp = rdp->mynode;
4017 	raw_spin_lock_rcu_node(rnp);		/* irqs already disabled. */
4018 	rdp->beenonline = true;	 /* We have now been online. */
4019 	rdp->gp_seq = READ_ONCE(rnp->gp_seq);
4020 	rdp->gp_seq_needed = rdp->gp_seq;
4021 	rdp->cpu_no_qs.b.norm = true;
4022 	rdp->core_needs_qs = false;
4023 	rdp->rcu_iw_pending = false;
4024 	rdp->rcu_iw_gp_seq = rdp->gp_seq - 1;
4025 	trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl"));
4026 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4027 	rcu_prepare_kthreads(cpu);
4028 	rcu_spawn_cpu_nocb_kthread(cpu);
4029 
4030 	return 0;
4031 }
4032 
4033 /*
4034  * Update RCU priority boot kthread affinity for CPU-hotplug changes.
4035  */
rcutree_affinity_setting(unsigned int cpu,int outgoing)4036 static void rcutree_affinity_setting(unsigned int cpu, int outgoing)
4037 {
4038 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4039 
4040 	rcu_boost_kthread_setaffinity(rdp->mynode, outgoing);
4041 }
4042 
4043 /*
4044  * Near the end of the CPU-online process.  Pretty much all services
4045  * enabled, and the CPU is now very much alive.
4046  */
rcutree_online_cpu(unsigned int cpu)4047 int rcutree_online_cpu(unsigned int cpu)
4048 {
4049 	unsigned long flags;
4050 	struct rcu_data *rdp;
4051 	struct rcu_node *rnp;
4052 
4053 	rdp = per_cpu_ptr(&rcu_data, cpu);
4054 	rnp = rdp->mynode;
4055 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
4056 	rnp->ffmask |= rdp->grpmask;
4057 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4058 	if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
4059 		return 0; /* Too early in boot for scheduler work. */
4060 	sync_sched_exp_online_cleanup(cpu);
4061 	rcutree_affinity_setting(cpu, -1);
4062 
4063 	// Stop-machine done, so allow nohz_full to disable tick.
4064 	tick_dep_clear(TICK_DEP_BIT_RCU);
4065 	return 0;
4066 }
4067 
4068 /*
4069  * Near the beginning of the process.  The CPU is still very much alive
4070  * with pretty much all services enabled.
4071  */
rcutree_offline_cpu(unsigned int cpu)4072 int rcutree_offline_cpu(unsigned int cpu)
4073 {
4074 	unsigned long flags;
4075 	struct rcu_data *rdp;
4076 	struct rcu_node *rnp;
4077 
4078 	rdp = per_cpu_ptr(&rcu_data, cpu);
4079 	rnp = rdp->mynode;
4080 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
4081 	rnp->ffmask &= ~rdp->grpmask;
4082 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4083 
4084 	rcutree_affinity_setting(cpu, cpu);
4085 
4086 	// nohz_full CPUs need the tick for stop-machine to work quickly
4087 	tick_dep_set(TICK_DEP_BIT_RCU);
4088 	return 0;
4089 }
4090 
4091 /*
4092  * Mark the specified CPU as being online so that subsequent grace periods
4093  * (both expedited and normal) will wait on it.  Note that this means that
4094  * incoming CPUs are not allowed to use RCU read-side critical sections
4095  * until this function is called.  Failing to observe this restriction
4096  * will result in lockdep splats.
4097  *
4098  * Note that this function is special in that it is invoked directly
4099  * from the incoming CPU rather than from the cpuhp_step mechanism.
4100  * This is because this function must be invoked at a precise location.
4101  */
rcu_cpu_starting(unsigned int cpu)4102 void rcu_cpu_starting(unsigned int cpu)
4103 {
4104 	unsigned long flags;
4105 	unsigned long mask;
4106 	struct rcu_data *rdp;
4107 	struct rcu_node *rnp;
4108 	bool newcpu;
4109 
4110 	rdp = per_cpu_ptr(&rcu_data, cpu);
4111 	if (rdp->cpu_started)
4112 		return;
4113 	rdp->cpu_started = true;
4114 
4115 	rnp = rdp->mynode;
4116 	mask = rdp->grpmask;
4117 	WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1);
4118 	WARN_ON_ONCE(!(rnp->ofl_seq & 0x1));
4119 	smp_mb(); // Pair with rcu_gp_cleanup()'s ->ofl_seq barrier().
4120 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
4121 	WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext | mask);
4122 	newcpu = !(rnp->expmaskinitnext & mask);
4123 	rnp->expmaskinitnext |= mask;
4124 	/* Allow lockless access for expedited grace periods. */
4125 	smp_store_release(&rcu_state.ncpus, rcu_state.ncpus + newcpu); /* ^^^ */
4126 	ASSERT_EXCLUSIVE_WRITER(rcu_state.ncpus);
4127 	rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */
4128 	rdp->rcu_onl_gp_seq = READ_ONCE(rcu_state.gp_seq);
4129 	rdp->rcu_onl_gp_flags = READ_ONCE(rcu_state.gp_flags);
4130 	if (rnp->qsmask & mask) { /* RCU waiting on incoming CPU? */
4131 		rcu_disable_urgency_upon_qs(rdp);
4132 		/* Report QS -after- changing ->qsmaskinitnext! */
4133 		rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
4134 	} else {
4135 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4136 	}
4137 	smp_mb(); // Pair with rcu_gp_cleanup()'s ->ofl_seq barrier().
4138 	WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1);
4139 	WARN_ON_ONCE(rnp->ofl_seq & 0x1);
4140 	smp_mb(); /* Ensure RCU read-side usage follows above initialization. */
4141 }
4142 
4143 /*
4144  * The outgoing function has no further need of RCU, so remove it from
4145  * the rcu_node tree's ->qsmaskinitnext bit masks.
4146  *
4147  * Note that this function is special in that it is invoked directly
4148  * from the outgoing CPU rather than from the cpuhp_step mechanism.
4149  * This is because this function must be invoked at a precise location.
4150  */
rcu_report_dead(unsigned int cpu)4151 void rcu_report_dead(unsigned int cpu)
4152 {
4153 	unsigned long flags;
4154 	unsigned long mask;
4155 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4156 	struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */
4157 
4158 	/* QS for any half-done expedited grace period. */
4159 	preempt_disable();
4160 	rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
4161 	preempt_enable();
4162 	rcu_preempt_deferred_qs(current);
4163 
4164 	/* Remove outgoing CPU from mask in the leaf rcu_node structure. */
4165 	mask = rdp->grpmask;
4166 	WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1);
4167 	WARN_ON_ONCE(!(rnp->ofl_seq & 0x1));
4168 	smp_mb(); // Pair with rcu_gp_cleanup()'s ->ofl_seq barrier().
4169 	raw_spin_lock(&rcu_state.ofl_lock);
4170 	raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
4171 	rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq);
4172 	rdp->rcu_ofl_gp_flags = READ_ONCE(rcu_state.gp_flags);
4173 	if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */
4174 		/* Report quiescent state -before- changing ->qsmaskinitnext! */
4175 		rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
4176 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
4177 	}
4178 	WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext & ~mask);
4179 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4180 	raw_spin_unlock(&rcu_state.ofl_lock);
4181 	smp_mb(); // Pair with rcu_gp_cleanup()'s ->ofl_seq barrier().
4182 	WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1);
4183 	WARN_ON_ONCE(rnp->ofl_seq & 0x1);
4184 
4185 	rdp->cpu_started = false;
4186 }
4187 
4188 #ifdef CONFIG_HOTPLUG_CPU
4189 /*
4190  * The outgoing CPU has just passed through the dying-idle state, and we
4191  * are being invoked from the CPU that was IPIed to continue the offline
4192  * operation.  Migrate the outgoing CPU's callbacks to the current CPU.
4193  */
rcutree_migrate_callbacks(int cpu)4194 void rcutree_migrate_callbacks(int cpu)
4195 {
4196 	unsigned long flags;
4197 	struct rcu_data *my_rdp;
4198 	struct rcu_node *my_rnp;
4199 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4200 	bool needwake;
4201 
4202 	if (rcu_segcblist_is_offloaded(&rdp->cblist) ||
4203 	    rcu_segcblist_empty(&rdp->cblist))
4204 		return;  /* No callbacks to migrate. */
4205 
4206 	local_irq_save(flags);
4207 	my_rdp = this_cpu_ptr(&rcu_data);
4208 	my_rnp = my_rdp->mynode;
4209 	rcu_nocb_lock(my_rdp); /* irqs already disabled. */
4210 	WARN_ON_ONCE(!rcu_nocb_flush_bypass(my_rdp, NULL, jiffies));
4211 	raw_spin_lock_rcu_node(my_rnp); /* irqs already disabled. */
4212 	/* Leverage recent GPs and set GP for new callbacks. */
4213 	needwake = rcu_advance_cbs(my_rnp, rdp) ||
4214 		   rcu_advance_cbs(my_rnp, my_rdp);
4215 	rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist);
4216 	needwake = needwake || rcu_advance_cbs(my_rnp, my_rdp);
4217 	rcu_segcblist_disable(&rdp->cblist);
4218 	WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) !=
4219 		     !rcu_segcblist_n_cbs(&my_rdp->cblist));
4220 	if (rcu_segcblist_is_offloaded(&my_rdp->cblist)) {
4221 		raw_spin_unlock_rcu_node(my_rnp); /* irqs remain disabled. */
4222 		__call_rcu_nocb_wake(my_rdp, true, flags);
4223 	} else {
4224 		rcu_nocb_unlock(my_rdp); /* irqs remain disabled. */
4225 		raw_spin_unlock_irqrestore_rcu_node(my_rnp, flags);
4226 	}
4227 	if (needwake)
4228 		rcu_gp_kthread_wake();
4229 	lockdep_assert_irqs_enabled();
4230 	WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 ||
4231 		  !rcu_segcblist_empty(&rdp->cblist),
4232 		  "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, 1stCB=%p\n",
4233 		  cpu, rcu_segcblist_n_cbs(&rdp->cblist),
4234 		  rcu_segcblist_first_cb(&rdp->cblist));
4235 }
4236 #endif
4237 
4238 /*
4239  * On non-huge systems, use expedited RCU grace periods to make suspend
4240  * and hibernation run faster.
4241  */
rcu_pm_notify(struct notifier_block * self,unsigned long action,void * hcpu)4242 static int rcu_pm_notify(struct notifier_block *self,
4243 			 unsigned long action, void *hcpu)
4244 {
4245 	switch (action) {
4246 	case PM_HIBERNATION_PREPARE:
4247 	case PM_SUSPEND_PREPARE:
4248 		rcu_expedite_gp();
4249 		break;
4250 	case PM_POST_HIBERNATION:
4251 	case PM_POST_SUSPEND:
4252 		rcu_unexpedite_gp();
4253 		break;
4254 	default:
4255 		break;
4256 	}
4257 	return NOTIFY_OK;
4258 }
4259 
4260 /*
4261  * Spawn the kthreads that handle RCU's grace periods.
4262  */
rcu_spawn_gp_kthread(void)4263 static int __init rcu_spawn_gp_kthread(void)
4264 {
4265 	unsigned long flags;
4266 	int kthread_prio_in = kthread_prio;
4267 	struct rcu_node *rnp;
4268 	struct sched_param sp;
4269 	struct task_struct *t;
4270 
4271 	/* Force priority into range. */
4272 	if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 2
4273 	    && IS_BUILTIN(CONFIG_RCU_TORTURE_TEST))
4274 		kthread_prio = 2;
4275 	else if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
4276 		kthread_prio = 1;
4277 	else if (kthread_prio < 0)
4278 		kthread_prio = 0;
4279 	else if (kthread_prio > 99)
4280 		kthread_prio = 99;
4281 
4282 	if (kthread_prio != kthread_prio_in)
4283 		pr_alert("rcu_spawn_gp_kthread(): Limited prio to %d from %d\n",
4284 			 kthread_prio, kthread_prio_in);
4285 
4286 	rcu_scheduler_fully_active = 1;
4287 	t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name);
4288 	if (WARN_ONCE(IS_ERR(t), "%s: Could not start grace-period kthread, OOM is now expected behavior\n", __func__))
4289 		return 0;
4290 	if (kthread_prio) {
4291 		sp.sched_priority = kthread_prio;
4292 		sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
4293 	}
4294 	rnp = rcu_get_root();
4295 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
4296 	WRITE_ONCE(rcu_state.gp_activity, jiffies);
4297 	WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
4298 	// Reset .gp_activity and .gp_req_activity before setting .gp_kthread.
4299 	smp_store_release(&rcu_state.gp_kthread, t);  /* ^^^ */
4300 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4301 	wake_up_process(t);
4302 	rcu_spawn_nocb_kthreads();
4303 	rcu_spawn_boost_kthreads();
4304 	rcu_spawn_core_kthreads();
4305 	return 0;
4306 }
4307 early_initcall(rcu_spawn_gp_kthread);
4308 
4309 /*
4310  * This function is invoked towards the end of the scheduler's
4311  * initialization process.  Before this is called, the idle task might
4312  * contain synchronous grace-period primitives (during which time, this idle
4313  * task is booting the system, and such primitives are no-ops).  After this
4314  * function is called, any synchronous grace-period primitives are run as
4315  * expedited, with the requesting task driving the grace period forward.
4316  * A later core_initcall() rcu_set_runtime_mode() will switch to full
4317  * runtime RCU functionality.
4318  */
rcu_scheduler_starting(void)4319 void rcu_scheduler_starting(void)
4320 {
4321 	WARN_ON(num_online_cpus() != 1);
4322 	WARN_ON(nr_context_switches() > 0);
4323 	rcu_test_sync_prims();
4324 	rcu_scheduler_active = RCU_SCHEDULER_INIT;
4325 	rcu_test_sync_prims();
4326 }
4327 
4328 /*
4329  * Helper function for rcu_init() that initializes the rcu_state structure.
4330  */
rcu_init_one(void)4331 static void __init rcu_init_one(void)
4332 {
4333 	static const char * const buf[] = RCU_NODE_NAME_INIT;
4334 	static const char * const fqs[] = RCU_FQS_NAME_INIT;
4335 	static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
4336 	static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
4337 
4338 	int levelspread[RCU_NUM_LVLS];		/* kids/node in each level. */
4339 	int cpustride = 1;
4340 	int i;
4341 	int j;
4342 	struct rcu_node *rnp;
4343 
4344 	BUILD_BUG_ON(RCU_NUM_LVLS > ARRAY_SIZE(buf));  /* Fix buf[] init! */
4345 
4346 	/* Silence gcc 4.8 false positive about array index out of range. */
4347 	if (rcu_num_lvls <= 0 || rcu_num_lvls > RCU_NUM_LVLS)
4348 		panic("rcu_init_one: rcu_num_lvls out of range");
4349 
4350 	/* Initialize the level-tracking arrays. */
4351 
4352 	for (i = 1; i < rcu_num_lvls; i++)
4353 		rcu_state.level[i] =
4354 			rcu_state.level[i - 1] + num_rcu_lvl[i - 1];
4355 	rcu_init_levelspread(levelspread, num_rcu_lvl);
4356 
4357 	/* Initialize the elements themselves, starting from the leaves. */
4358 
4359 	for (i = rcu_num_lvls - 1; i >= 0; i--) {
4360 		cpustride *= levelspread[i];
4361 		rnp = rcu_state.level[i];
4362 		for (j = 0; j < num_rcu_lvl[i]; j++, rnp++) {
4363 			raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock));
4364 			lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock),
4365 						   &rcu_node_class[i], buf[i]);
4366 			raw_spin_lock_init(&rnp->fqslock);
4367 			lockdep_set_class_and_name(&rnp->fqslock,
4368 						   &rcu_fqs_class[i], fqs[i]);
4369 			rnp->gp_seq = rcu_state.gp_seq;
4370 			rnp->gp_seq_needed = rcu_state.gp_seq;
4371 			rnp->completedqs = rcu_state.gp_seq;
4372 			rnp->qsmask = 0;
4373 			rnp->qsmaskinit = 0;
4374 			rnp->grplo = j * cpustride;
4375 			rnp->grphi = (j + 1) * cpustride - 1;
4376 			if (rnp->grphi >= nr_cpu_ids)
4377 				rnp->grphi = nr_cpu_ids - 1;
4378 			if (i == 0) {
4379 				rnp->grpnum = 0;
4380 				rnp->grpmask = 0;
4381 				rnp->parent = NULL;
4382 			} else {
4383 				rnp->grpnum = j % levelspread[i - 1];
4384 				rnp->grpmask = BIT(rnp->grpnum);
4385 				rnp->parent = rcu_state.level[i - 1] +
4386 					      j / levelspread[i - 1];
4387 			}
4388 			rnp->level = i;
4389 			INIT_LIST_HEAD(&rnp->blkd_tasks);
4390 			rcu_init_one_nocb(rnp);
4391 			init_waitqueue_head(&rnp->exp_wq[0]);
4392 			init_waitqueue_head(&rnp->exp_wq[1]);
4393 			init_waitqueue_head(&rnp->exp_wq[2]);
4394 			init_waitqueue_head(&rnp->exp_wq[3]);
4395 			spin_lock_init(&rnp->exp_lock);
4396 		}
4397 	}
4398 
4399 	init_swait_queue_head(&rcu_state.gp_wq);
4400 	init_swait_queue_head(&rcu_state.expedited_wq);
4401 	rnp = rcu_first_leaf_node();
4402 	for_each_possible_cpu(i) {
4403 		while (i > rnp->grphi)
4404 			rnp++;
4405 		per_cpu_ptr(&rcu_data, i)->mynode = rnp;
4406 		rcu_boot_init_percpu_data(i);
4407 	}
4408 }
4409 
4410 /*
4411  * Compute the rcu_node tree geometry from kernel parameters.  This cannot
4412  * replace the definitions in tree.h because those are needed to size
4413  * the ->node array in the rcu_state structure.
4414  */
rcu_init_geometry(void)4415 void rcu_init_geometry(void)
4416 {
4417 	ulong d;
4418 	int i;
4419 	static unsigned long old_nr_cpu_ids;
4420 	int rcu_capacity[RCU_NUM_LVLS];
4421 	static bool initialized;
4422 
4423 	if (initialized) {
4424 		/*
4425 		 * Warn if setup_nr_cpu_ids() had not yet been invoked,
4426 		 * unless nr_cpus_ids == NR_CPUS, in which case who cares?
4427 		 */
4428 		WARN_ON_ONCE(old_nr_cpu_ids != nr_cpu_ids);
4429 		return;
4430 	}
4431 
4432 	old_nr_cpu_ids = nr_cpu_ids;
4433 	initialized = true;
4434 
4435 	/*
4436 	 * Initialize any unspecified boot parameters.
4437 	 * The default values of jiffies_till_first_fqs and
4438 	 * jiffies_till_next_fqs are set to the RCU_JIFFIES_TILL_FORCE_QS
4439 	 * value, which is a function of HZ, then adding one for each
4440 	 * RCU_JIFFIES_FQS_DIV CPUs that might be on the system.
4441 	 */
4442 	d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
4443 	if (jiffies_till_first_fqs == ULONG_MAX)
4444 		jiffies_till_first_fqs = d;
4445 	if (jiffies_till_next_fqs == ULONG_MAX)
4446 		jiffies_till_next_fqs = d;
4447 	adjust_jiffies_till_sched_qs();
4448 
4449 	/* If the compile-time values are accurate, just leave. */
4450 	if (rcu_fanout_leaf == RCU_FANOUT_LEAF &&
4451 	    nr_cpu_ids == NR_CPUS)
4452 		return;
4453 	pr_info("Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n",
4454 		rcu_fanout_leaf, nr_cpu_ids);
4455 
4456 	/*
4457 	 * The boot-time rcu_fanout_leaf parameter must be at least two
4458 	 * and cannot exceed the number of bits in the rcu_node masks.
4459 	 * Complain and fall back to the compile-time values if this
4460 	 * limit is exceeded.
4461 	 */
4462 	if (rcu_fanout_leaf < 2 ||
4463 	    rcu_fanout_leaf > sizeof(unsigned long) * 8) {
4464 		rcu_fanout_leaf = RCU_FANOUT_LEAF;
4465 		WARN_ON(1);
4466 		return;
4467 	}
4468 
4469 	/*
4470 	 * Compute number of nodes that can be handled an rcu_node tree
4471 	 * with the given number of levels.
4472 	 */
4473 	rcu_capacity[0] = rcu_fanout_leaf;
4474 	for (i = 1; i < RCU_NUM_LVLS; i++)
4475 		rcu_capacity[i] = rcu_capacity[i - 1] * RCU_FANOUT;
4476 
4477 	/*
4478 	 * The tree must be able to accommodate the configured number of CPUs.
4479 	 * If this limit is exceeded, fall back to the compile-time values.
4480 	 */
4481 	if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1]) {
4482 		rcu_fanout_leaf = RCU_FANOUT_LEAF;
4483 		WARN_ON(1);
4484 		return;
4485 	}
4486 
4487 	/* Calculate the number of levels in the tree. */
4488 	for (i = 0; nr_cpu_ids > rcu_capacity[i]; i++) {
4489 	}
4490 	rcu_num_lvls = i + 1;
4491 
4492 	/* Calculate the number of rcu_nodes at each level of the tree. */
4493 	for (i = 0; i < rcu_num_lvls; i++) {
4494 		int cap = rcu_capacity[(rcu_num_lvls - 1) - i];
4495 		num_rcu_lvl[i] = DIV_ROUND_UP(nr_cpu_ids, cap);
4496 	}
4497 
4498 	/* Calculate the total number of rcu_node structures. */
4499 	rcu_num_nodes = 0;
4500 	for (i = 0; i < rcu_num_lvls; i++)
4501 		rcu_num_nodes += num_rcu_lvl[i];
4502 }
4503 
4504 /*
4505  * Dump out the structure of the rcu_node combining tree associated
4506  * with the rcu_state structure.
4507  */
rcu_dump_rcu_node_tree(void)4508 static void __init rcu_dump_rcu_node_tree(void)
4509 {
4510 	int level = 0;
4511 	struct rcu_node *rnp;
4512 
4513 	pr_info("rcu_node tree layout dump\n");
4514 	pr_info(" ");
4515 	rcu_for_each_node_breadth_first(rnp) {
4516 		if (rnp->level != level) {
4517 			pr_cont("\n");
4518 			pr_info(" ");
4519 			level = rnp->level;
4520 		}
4521 		pr_cont("%d:%d ^%d  ", rnp->grplo, rnp->grphi, rnp->grpnum);
4522 	}
4523 	pr_cont("\n");
4524 }
4525 
4526 struct workqueue_struct *rcu_gp_wq;
4527 struct workqueue_struct *rcu_par_gp_wq;
4528 
kfree_rcu_batch_init(void)4529 static void __init kfree_rcu_batch_init(void)
4530 {
4531 	int cpu;
4532 	int i;
4533 
4534 	for_each_possible_cpu(cpu) {
4535 		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
4536 
4537 		for (i = 0; i < KFREE_N_BATCHES; i++) {
4538 			INIT_RCU_WORK(&krcp->krw_arr[i].rcu_work, kfree_rcu_work);
4539 			krcp->krw_arr[i].krcp = krcp;
4540 		}
4541 
4542 		INIT_DELAYED_WORK(&krcp->monitor_work, kfree_rcu_monitor);
4543 		INIT_WORK(&krcp->page_cache_work, fill_page_cache_func);
4544 		krcp->initialized = true;
4545 	}
4546 	if (register_shrinker(&kfree_rcu_shrinker))
4547 		pr_err("Failed to register kfree_rcu() shrinker!\n");
4548 }
4549 
rcu_init(void)4550 void __init rcu_init(void)
4551 {
4552 	int cpu;
4553 
4554 	rcu_early_boot_tests();
4555 
4556 	kfree_rcu_batch_init();
4557 	rcu_bootup_announce();
4558 	rcu_init_geometry();
4559 	rcu_init_one();
4560 	if (dump_tree)
4561 		rcu_dump_rcu_node_tree();
4562 	if (use_softirq)
4563 		open_softirq(RCU_SOFTIRQ, rcu_core_si);
4564 
4565 	/*
4566 	 * We don't need protection against CPU-hotplug here because
4567 	 * this is called early in boot, before either interrupts
4568 	 * or the scheduler are operational.
4569 	 */
4570 	pm_notifier(rcu_pm_notify, 0);
4571 	for_each_online_cpu(cpu) {
4572 		rcutree_prepare_cpu(cpu);
4573 		rcu_cpu_starting(cpu);
4574 		rcutree_online_cpu(cpu);
4575 	}
4576 
4577 	/* Create workqueue for expedited GPs and for Tree SRCU. */
4578 	rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM, 0);
4579 	WARN_ON(!rcu_gp_wq);
4580 	rcu_par_gp_wq = alloc_workqueue("rcu_par_gp", WQ_MEM_RECLAIM, 0);
4581 	WARN_ON(!rcu_par_gp_wq);
4582 	srcu_init();
4583 
4584 	/* Fill in default value for rcutree.qovld boot parameter. */
4585 	/* -After- the rcu_node ->lock fields are initialized! */
4586 	if (qovld < 0)
4587 		qovld_calc = DEFAULT_RCU_QOVLD_MULT * qhimark;
4588 	else
4589 		qovld_calc = qovld;
4590 }
4591 
4592 #include "tree_stall.h"
4593 #include "tree_exp.h"
4594 #include "tree_plugin.h"
4595