• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Read-Copy Update mechanism for mutual exclusion (tree-based version)
4  *
5  * Copyright IBM Corporation, 2008
6  *
7  * Authors: Dipankar Sarma <dipankar@in.ibm.com>
8  *	    Manfred Spraul <manfred@colorfullife.com>
9  *	    Paul E. McKenney <paulmck@linux.ibm.com>
10  *
11  * Based on the original work by Paul McKenney <paulmck@linux.ibm.com>
12  * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
13  *
14  * For detailed explanation of Read-Copy Update mechanism see -
15  *	Documentation/RCU
16  */
17 
18 #define pr_fmt(fmt) "rcu: " fmt
19 
20 #include <linux/types.h>
21 #include <linux/kernel.h>
22 #include <linux/init.h>
23 #include <linux/spinlock.h>
24 #include <linux/smp.h>
25 #include <linux/rcupdate_wait.h>
26 #include <linux/interrupt.h>
27 #include <linux/sched.h>
28 #include <linux/sched/debug.h>
29 #include <linux/nmi.h>
30 #include <linux/atomic.h>
31 #include <linux/bitops.h>
32 #include <linux/export.h>
33 #include <linux/completion.h>
34 #include <linux/moduleparam.h>
35 #include <linux/percpu.h>
36 #include <linux/notifier.h>
37 #include <linux/cpu.h>
38 #include <linux/mutex.h>
39 #include <linux/time.h>
40 #include <linux/kernel_stat.h>
41 #include <linux/wait.h>
42 #include <linux/kthread.h>
43 #include <uapi/linux/sched/types.h>
44 #include <linux/prefetch.h>
45 #include <linux/delay.h>
46 #include <linux/random.h>
47 #include <linux/trace_events.h>
48 #include <linux/suspend.h>
49 #include <linux/ftrace.h>
50 #include <linux/tick.h>
51 #include <linux/sysrq.h>
52 #include <linux/kprobes.h>
53 #include <linux/gfp.h>
54 #include <linux/oom.h>
55 #include <linux/smpboot.h>
56 #include <linux/jiffies.h>
57 #include <linux/slab.h>
58 #include <linux/sched/isolation.h>
59 #include <linux/sched/clock.h>
60 #include <linux/vmalloc.h>
61 #include <linux/mm.h>
62 #include <linux/kasan.h>
63 #include "../time/tick-internal.h"
64 
65 #include "tree.h"
66 #include "rcu.h"
67 
68 #ifdef MODULE_PARAM_PREFIX
69 #undef MODULE_PARAM_PREFIX
70 #endif
71 #define MODULE_PARAM_PREFIX "rcutree."
72 
73 /* Data structures. */
74 
75 /*
76  * Steal a bit from the bottom of ->dynticks for idle entry/exit
77  * control.  Initially this is for TLB flushing.
78  */
79 #define RCU_DYNTICK_CTRL_MASK 0x1
80 #define RCU_DYNTICK_CTRL_CTR  (RCU_DYNTICK_CTRL_MASK + 1)
81 
82 static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = {
83 	.dynticks_nesting = 1,
84 	.dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE,
85 	.dynticks = ATOMIC_INIT(RCU_DYNTICK_CTRL_CTR),
86 };
87 static struct rcu_state rcu_state = {
88 	.level = { &rcu_state.node[0] },
89 	.gp_state = RCU_GP_IDLE,
90 	.gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT,
91 	.barrier_mutex = __MUTEX_INITIALIZER(rcu_state.barrier_mutex),
92 	.name = RCU_NAME,
93 	.abbr = RCU_ABBR,
94 	.exp_mutex = __MUTEX_INITIALIZER(rcu_state.exp_mutex),
95 	.exp_wake_mutex = __MUTEX_INITIALIZER(rcu_state.exp_wake_mutex),
96 	.ofl_lock = __RAW_SPIN_LOCK_UNLOCKED(rcu_state.ofl_lock),
97 };
98 
99 /* Dump rcu_node combining tree at boot to verify correct setup. */
100 static bool dump_tree;
101 module_param(dump_tree, bool, 0444);
102 /* By default, use RCU_SOFTIRQ instead of rcuc kthreads. */
103 static bool use_softirq = true;
104 module_param(use_softirq, bool, 0444);
105 /* Control rcu_node-tree auto-balancing at boot time. */
106 static bool rcu_fanout_exact;
107 module_param(rcu_fanout_exact, bool, 0444);
108 /* Increase (but not decrease) the RCU_FANOUT_LEAF at boot time. */
109 static int rcu_fanout_leaf = RCU_FANOUT_LEAF;
110 module_param(rcu_fanout_leaf, int, 0444);
111 int rcu_num_lvls __read_mostly = RCU_NUM_LVLS;
112 /* Number of rcu_nodes at specified level. */
113 int num_rcu_lvl[] = NUM_RCU_LVL_INIT;
114 int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
115 
116 /*
117  * The rcu_scheduler_active variable is initialized to the value
118  * RCU_SCHEDULER_INACTIVE and transitions RCU_SCHEDULER_INIT just before the
119  * first task is spawned.  So when this variable is RCU_SCHEDULER_INACTIVE,
120  * RCU can assume that there is but one task, allowing RCU to (for example)
121  * optimize synchronize_rcu() to a simple barrier().  When this variable
122  * is RCU_SCHEDULER_INIT, RCU must actually do all the hard work required
123  * to detect real grace periods.  This variable is also used to suppress
124  * boot-time false positives from lockdep-RCU error checking.  Finally, it
125  * transitions from RCU_SCHEDULER_INIT to RCU_SCHEDULER_RUNNING after RCU
126  * is fully initialized, including all of its kthreads having been spawned.
127  */
128 int rcu_scheduler_active __read_mostly;
129 EXPORT_SYMBOL_GPL(rcu_scheduler_active);
130 
131 /*
132  * The rcu_scheduler_fully_active variable transitions from zero to one
133  * during the early_initcall() processing, which is after the scheduler
134  * is capable of creating new tasks.  So RCU processing (for example,
135  * creating tasks for RCU priority boosting) must be delayed until after
136  * rcu_scheduler_fully_active transitions from zero to one.  We also
137  * currently delay invocation of any RCU callbacks until after this point.
138  *
139  * It might later prove better for people registering RCU callbacks during
140  * early boot to take responsibility for these callbacks, but one step at
141  * a time.
142  */
143 static int rcu_scheduler_fully_active __read_mostly;
144 
145 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
146 			      unsigned long gps, unsigned long flags);
147 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
148 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
149 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
150 static void invoke_rcu_core(void);
151 static void rcu_report_exp_rdp(struct rcu_data *rdp);
152 static void sync_sched_exp_online_cleanup(int cpu);
153 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp);
154 
155 /* rcuc/rcub kthread realtime priority */
156 static int kthread_prio = IS_ENABLED(CONFIG_RCU_BOOST) ? 1 : 0;
157 module_param(kthread_prio, int, 0444);
158 
159 /* Delay in jiffies for grace-period initialization delays, debug only. */
160 
161 static int gp_preinit_delay;
162 module_param(gp_preinit_delay, int, 0444);
163 static int gp_init_delay;
164 module_param(gp_init_delay, int, 0444);
165 static int gp_cleanup_delay;
166 module_param(gp_cleanup_delay, int, 0444);
167 
168 // Add delay to rcu_read_unlock() for strict grace periods.
169 static int rcu_unlock_delay;
170 #ifdef CONFIG_RCU_STRICT_GRACE_PERIOD
171 module_param(rcu_unlock_delay, int, 0444);
172 #endif
173 
174 /*
175  * This rcu parameter is runtime-read-only. It reflects
176  * a minimum allowed number of objects which can be cached
177  * per-CPU. Object size is equal to one page. This value
178  * can be changed at boot time.
179  */
180 static int rcu_min_cached_objs = 5;
181 module_param(rcu_min_cached_objs, int, 0444);
182 
183 /* Retrieve RCU kthreads priority for rcutorture */
rcu_get_gp_kthreads_prio(void)184 int rcu_get_gp_kthreads_prio(void)
185 {
186 	return kthread_prio;
187 }
188 EXPORT_SYMBOL_GPL(rcu_get_gp_kthreads_prio);
189 
190 /*
191  * Number of grace periods between delays, normalized by the duration of
192  * the delay.  The longer the delay, the more the grace periods between
193  * each delay.  The reason for this normalization is that it means that,
194  * for non-zero delays, the overall slowdown of grace periods is constant
195  * regardless of the duration of the delay.  This arrangement balances
196  * the need for long delays to increase some race probabilities with the
197  * need for fast grace periods to increase other race probabilities.
198  */
199 #define PER_RCU_NODE_PERIOD 3	/* Number of grace periods between delays. */
200 
201 /*
202  * Compute the mask of online CPUs for the specified rcu_node structure.
203  * This will not be stable unless the rcu_node structure's ->lock is
204  * held, but the bit corresponding to the current CPU will be stable
205  * in most contexts.
206  */
rcu_rnp_online_cpus(struct rcu_node * rnp)207 static unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
208 {
209 	return READ_ONCE(rnp->qsmaskinitnext);
210 }
211 
212 /*
213  * Return true if an RCU grace period is in progress.  The READ_ONCE()s
214  * permit this function to be invoked without holding the root rcu_node
215  * structure's ->lock, but of course results can be subject to change.
216  */
rcu_gp_in_progress(void)217 static int rcu_gp_in_progress(void)
218 {
219 	return rcu_seq_state(rcu_seq_current(&rcu_state.gp_seq));
220 }
221 
222 /*
223  * Return the number of callbacks queued on the specified CPU.
224  * Handles both the nocbs and normal cases.
225  */
rcu_get_n_cbs_cpu(int cpu)226 static long rcu_get_n_cbs_cpu(int cpu)
227 {
228 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
229 
230 	if (rcu_segcblist_is_enabled(&rdp->cblist))
231 		return rcu_segcblist_n_cbs(&rdp->cblist);
232 	return 0;
233 }
234 
rcu_softirq_qs(void)235 void rcu_softirq_qs(void)
236 {
237 	rcu_qs();
238 	rcu_preempt_deferred_qs(current);
239 }
240 
241 /*
242  * Record entry into an extended quiescent state.  This is only to be
243  * called when not already in an extended quiescent state, that is,
244  * RCU is watching prior to the call to this function and is no longer
245  * watching upon return.
246  */
rcu_dynticks_eqs_enter(void)247 static noinstr void rcu_dynticks_eqs_enter(void)
248 {
249 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
250 	int seq;
251 
252 	/*
253 	 * CPUs seeing atomic_add_return() must see prior RCU read-side
254 	 * critical sections, and we also must force ordering with the
255 	 * next idle sojourn.
256 	 */
257 	rcu_dynticks_task_trace_enter();  // Before ->dynticks update!
258 	seq = arch_atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
259 	// RCU is no longer watching.  Better be in extended quiescent state!
260 	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
261 		     (seq & RCU_DYNTICK_CTRL_CTR));
262 	/* Better not have special action (TLB flush) pending! */
263 	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
264 		     (seq & RCU_DYNTICK_CTRL_MASK));
265 }
266 
267 /*
268  * Record exit from an extended quiescent state.  This is only to be
269  * called from an extended quiescent state, that is, RCU is not watching
270  * prior to the call to this function and is watching upon return.
271  */
rcu_dynticks_eqs_exit(void)272 static noinstr void rcu_dynticks_eqs_exit(void)
273 {
274 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
275 	int seq;
276 
277 	/*
278 	 * CPUs seeing atomic_add_return() must see prior idle sojourns,
279 	 * and we also must force ordering with the next RCU read-side
280 	 * critical section.
281 	 */
282 	seq = arch_atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
283 	// RCU is now watching.  Better not be in an extended quiescent state!
284 	rcu_dynticks_task_trace_exit();  // After ->dynticks update!
285 	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
286 		     !(seq & RCU_DYNTICK_CTRL_CTR));
287 	if (seq & RCU_DYNTICK_CTRL_MASK) {
288 		arch_atomic_andnot(RCU_DYNTICK_CTRL_MASK, &rdp->dynticks);
289 		smp_mb__after_atomic(); /* _exit after clearing mask. */
290 	}
291 }
292 
293 /*
294  * Reset the current CPU's ->dynticks counter to indicate that the
295  * newly onlined CPU is no longer in an extended quiescent state.
296  * This will either leave the counter unchanged, or increment it
297  * to the next non-quiescent value.
298  *
299  * The non-atomic test/increment sequence works because the upper bits
300  * of the ->dynticks counter are manipulated only by the corresponding CPU,
301  * or when the corresponding CPU is offline.
302  */
rcu_dynticks_eqs_online(void)303 static void rcu_dynticks_eqs_online(void)
304 {
305 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
306 
307 	if (atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR)
308 		return;
309 	atomic_add(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
310 }
311 
312 /*
313  * Is the current CPU in an extended quiescent state?
314  *
315  * No ordering, as we are sampling CPU-local information.
316  */
rcu_dynticks_curr_cpu_in_eqs(void)317 static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void)
318 {
319 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
320 
321 	return !(arch_atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR);
322 }
323 
324 /*
325  * Snapshot the ->dynticks counter with full ordering so as to allow
326  * stable comparison of this counter with past and future snapshots.
327  */
rcu_dynticks_snap(struct rcu_data * rdp)328 static int rcu_dynticks_snap(struct rcu_data *rdp)
329 {
330 	int snap = atomic_add_return(0, &rdp->dynticks);
331 
332 	return snap & ~RCU_DYNTICK_CTRL_MASK;
333 }
334 
335 /*
336  * Return true if the snapshot returned from rcu_dynticks_snap()
337  * indicates that RCU is in an extended quiescent state.
338  */
rcu_dynticks_in_eqs(int snap)339 static bool rcu_dynticks_in_eqs(int snap)
340 {
341 	return !(snap & RCU_DYNTICK_CTRL_CTR);
342 }
343 
344 /*
345  * Return true if the CPU corresponding to the specified rcu_data
346  * structure has spent some time in an extended quiescent state since
347  * rcu_dynticks_snap() returned the specified snapshot.
348  */
rcu_dynticks_in_eqs_since(struct rcu_data * rdp,int snap)349 static bool rcu_dynticks_in_eqs_since(struct rcu_data *rdp, int snap)
350 {
351 	return snap != rcu_dynticks_snap(rdp);
352 }
353 
354 /*
355  * Return true if the referenced integer is zero while the specified
356  * CPU remains within a single extended quiescent state.
357  */
rcu_dynticks_zero_in_eqs(int cpu,int * vp)358 bool rcu_dynticks_zero_in_eqs(int cpu, int *vp)
359 {
360 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
361 	int snap;
362 
363 	// If not quiescent, force back to earlier extended quiescent state.
364 	snap = atomic_read(&rdp->dynticks) & ~(RCU_DYNTICK_CTRL_MASK |
365 					       RCU_DYNTICK_CTRL_CTR);
366 
367 	smp_rmb(); // Order ->dynticks and *vp reads.
368 	if (READ_ONCE(*vp))
369 		return false;  // Non-zero, so report failure;
370 	smp_rmb(); // Order *vp read and ->dynticks re-read.
371 
372 	// If still in the same extended quiescent state, we are good!
373 	return snap == (atomic_read(&rdp->dynticks) & ~RCU_DYNTICK_CTRL_MASK);
374 }
375 
376 /*
377  * Set the special (bottom) bit of the specified CPU so that it
378  * will take special action (such as flushing its TLB) on the
379  * next exit from an extended quiescent state.  Returns true if
380  * the bit was successfully set, or false if the CPU was not in
381  * an extended quiescent state.
382  */
rcu_eqs_special_set(int cpu)383 bool rcu_eqs_special_set(int cpu)
384 {
385 	int old;
386 	int new;
387 	int new_old;
388 	struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
389 
390 	new_old = atomic_read(&rdp->dynticks);
391 	do {
392 		old = new_old;
393 		if (old & RCU_DYNTICK_CTRL_CTR)
394 			return false;
395 		new = old | RCU_DYNTICK_CTRL_MASK;
396 		new_old = atomic_cmpxchg(&rdp->dynticks, old, new);
397 	} while (new_old != old);
398 	return true;
399 }
400 
401 /*
402  * Let the RCU core know that this CPU has gone through the scheduler,
403  * which is a quiescent state.  This is called when the need for a
404  * quiescent state is urgent, so we burn an atomic operation and full
405  * memory barriers to let the RCU core know about it, regardless of what
406  * this CPU might (or might not) do in the near future.
407  *
408  * We inform the RCU core by emulating a zero-duration dyntick-idle period.
409  *
410  * The caller must have disabled interrupts and must not be idle.
411  */
rcu_momentary_dyntick_idle(void)412 notrace void rcu_momentary_dyntick_idle(void)
413 {
414 	int special;
415 
416 	raw_cpu_write(rcu_data.rcu_need_heavy_qs, false);
417 	special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR,
418 				    &this_cpu_ptr(&rcu_data)->dynticks);
419 	/* It is illegal to call this from idle state. */
420 	WARN_ON_ONCE(!(special & RCU_DYNTICK_CTRL_CTR));
421 	rcu_preempt_deferred_qs(current);
422 }
423 EXPORT_SYMBOL_GPL(rcu_momentary_dyntick_idle);
424 
425 /**
426  * rcu_is_cpu_rrupt_from_idle - see if 'interrupted' from idle
427  *
428  * If the current CPU is idle and running at a first-level (not nested)
429  * interrupt, or directly, from idle, return true.
430  *
431  * The caller must have at least disabled IRQs.
432  */
rcu_is_cpu_rrupt_from_idle(void)433 static int rcu_is_cpu_rrupt_from_idle(void)
434 {
435 	long nesting;
436 
437 	/*
438 	 * Usually called from the tick; but also used from smp_function_call()
439 	 * for expedited grace periods. This latter can result in running from
440 	 * the idle task, instead of an actual IPI.
441 	 */
442 	lockdep_assert_irqs_disabled();
443 
444 	/* Check for counter underflows */
445 	RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) < 0,
446 			 "RCU dynticks_nesting counter underflow!");
447 	RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) <= 0,
448 			 "RCU dynticks_nmi_nesting counter underflow/zero!");
449 
450 	/* Are we at first interrupt nesting level? */
451 	nesting = __this_cpu_read(rcu_data.dynticks_nmi_nesting);
452 	if (nesting > 1)
453 		return false;
454 
455 	/*
456 	 * If we're not in an interrupt, we must be in the idle task!
457 	 */
458 	WARN_ON_ONCE(!nesting && !is_idle_task(current));
459 
460 	/* Does CPU appear to be idle from an RCU standpoint? */
461 	return __this_cpu_read(rcu_data.dynticks_nesting) == 0;
462 }
463 
464 #define DEFAULT_RCU_BLIMIT (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 1000 : 10)
465 				// Maximum callbacks per rcu_do_batch ...
466 #define DEFAULT_MAX_RCU_BLIMIT 10000 // ... even during callback flood.
467 static long blimit = DEFAULT_RCU_BLIMIT;
468 #define DEFAULT_RCU_QHIMARK 10000 // If this many pending, ignore blimit.
469 static long qhimark = DEFAULT_RCU_QHIMARK;
470 #define DEFAULT_RCU_QLOMARK 100   // Once only this many pending, use blimit.
471 static long qlowmark = DEFAULT_RCU_QLOMARK;
472 #define DEFAULT_RCU_QOVLD_MULT 2
473 #define DEFAULT_RCU_QOVLD (DEFAULT_RCU_QOVLD_MULT * DEFAULT_RCU_QHIMARK)
474 static long qovld = DEFAULT_RCU_QOVLD; // If this many pending, hammer QS.
475 static long qovld_calc = -1;	  // No pre-initialization lock acquisitions!
476 
477 module_param(blimit, long, 0444);
478 module_param(qhimark, long, 0444);
479 module_param(qlowmark, long, 0444);
480 module_param(qovld, long, 0444);
481 
482 static ulong jiffies_till_first_fqs = IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 0 : ULONG_MAX;
483 static ulong jiffies_till_next_fqs = ULONG_MAX;
484 static bool rcu_kick_kthreads;
485 static int rcu_divisor = 7;
486 module_param(rcu_divisor, int, 0644);
487 
488 /* Force an exit from rcu_do_batch() after 3 milliseconds. */
489 static long rcu_resched_ns = 3 * NSEC_PER_MSEC;
490 module_param(rcu_resched_ns, long, 0644);
491 
492 /*
493  * How long the grace period must be before we start recruiting
494  * quiescent-state help from rcu_note_context_switch().
495  */
496 static ulong jiffies_till_sched_qs = ULONG_MAX;
497 module_param(jiffies_till_sched_qs, ulong, 0444);
498 static ulong jiffies_to_sched_qs; /* See adjust_jiffies_till_sched_qs(). */
499 module_param(jiffies_to_sched_qs, ulong, 0444); /* Display only! */
500 
501 /*
502  * Make sure that we give the grace-period kthread time to detect any
503  * idle CPUs before taking active measures to force quiescent states.
504  * However, don't go below 100 milliseconds, adjusted upwards for really
505  * large systems.
506  */
adjust_jiffies_till_sched_qs(void)507 static void adjust_jiffies_till_sched_qs(void)
508 {
509 	unsigned long j;
510 
511 	/* If jiffies_till_sched_qs was specified, respect the request. */
512 	if (jiffies_till_sched_qs != ULONG_MAX) {
513 		WRITE_ONCE(jiffies_to_sched_qs, jiffies_till_sched_qs);
514 		return;
515 	}
516 	/* Otherwise, set to third fqs scan, but bound below on large system. */
517 	j = READ_ONCE(jiffies_till_first_fqs) +
518 		      2 * READ_ONCE(jiffies_till_next_fqs);
519 	if (j < HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV)
520 		j = HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
521 	pr_info("RCU calculated value of scheduler-enlistment delay is %ld jiffies.\n", j);
522 	WRITE_ONCE(jiffies_to_sched_qs, j);
523 }
524 
param_set_first_fqs_jiffies(const char * val,const struct kernel_param * kp)525 static int param_set_first_fqs_jiffies(const char *val, const struct kernel_param *kp)
526 {
527 	ulong j;
528 	int ret = kstrtoul(val, 0, &j);
529 
530 	if (!ret) {
531 		WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : j);
532 		adjust_jiffies_till_sched_qs();
533 	}
534 	return ret;
535 }
536 
param_set_next_fqs_jiffies(const char * val,const struct kernel_param * kp)537 static int param_set_next_fqs_jiffies(const char *val, const struct kernel_param *kp)
538 {
539 	ulong j;
540 	int ret = kstrtoul(val, 0, &j);
541 
542 	if (!ret) {
543 		WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : (j ?: 1));
544 		adjust_jiffies_till_sched_qs();
545 	}
546 	return ret;
547 }
548 
549 static struct kernel_param_ops first_fqs_jiffies_ops = {
550 	.set = param_set_first_fqs_jiffies,
551 	.get = param_get_ulong,
552 };
553 
554 static struct kernel_param_ops next_fqs_jiffies_ops = {
555 	.set = param_set_next_fqs_jiffies,
556 	.get = param_get_ulong,
557 };
558 
559 module_param_cb(jiffies_till_first_fqs, &first_fqs_jiffies_ops, &jiffies_till_first_fqs, 0644);
560 module_param_cb(jiffies_till_next_fqs, &next_fqs_jiffies_ops, &jiffies_till_next_fqs, 0644);
561 module_param(rcu_kick_kthreads, bool, 0644);
562 
563 static void force_qs_rnp(int (*f)(struct rcu_data *rdp));
564 static int rcu_pending(int user);
565 
566 /*
567  * Return the number of RCU GPs completed thus far for debug & stats.
568  */
rcu_get_gp_seq(void)569 unsigned long rcu_get_gp_seq(void)
570 {
571 	return READ_ONCE(rcu_state.gp_seq);
572 }
573 EXPORT_SYMBOL_GPL(rcu_get_gp_seq);
574 
575 /*
576  * Return the number of RCU expedited batches completed thus far for
577  * debug & stats.  Odd numbers mean that a batch is in progress, even
578  * numbers mean idle.  The value returned will thus be roughly double
579  * the cumulative batches since boot.
580  */
rcu_exp_batches_completed(void)581 unsigned long rcu_exp_batches_completed(void)
582 {
583 	return rcu_state.expedited_sequence;
584 }
585 EXPORT_SYMBOL_GPL(rcu_exp_batches_completed);
586 
587 /*
588  * Return the root node of the rcu_state structure.
589  */
rcu_get_root(void)590 static struct rcu_node *rcu_get_root(void)
591 {
592 	return &rcu_state.node[0];
593 }
594 
595 /*
596  * Send along grace-period-related data for rcutorture diagnostics.
597  */
rcutorture_get_gp_data(enum rcutorture_type test_type,int * flags,unsigned long * gp_seq)598 void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
599 			    unsigned long *gp_seq)
600 {
601 	switch (test_type) {
602 	case RCU_FLAVOR:
603 		*flags = READ_ONCE(rcu_state.gp_flags);
604 		*gp_seq = rcu_seq_current(&rcu_state.gp_seq);
605 		break;
606 	default:
607 		break;
608 	}
609 }
610 EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
611 
612 /*
613  * Enter an RCU extended quiescent state, which can be either the
614  * idle loop or adaptive-tickless usermode execution.
615  *
616  * We crowbar the ->dynticks_nmi_nesting field to zero to allow for
617  * the possibility of usermode upcalls having messed up our count
618  * of interrupt nesting level during the prior busy period.
619  */
rcu_eqs_enter(bool user)620 static noinstr void rcu_eqs_enter(bool user)
621 {
622 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
623 
624 	WARN_ON_ONCE(rdp->dynticks_nmi_nesting != DYNTICK_IRQ_NONIDLE);
625 	WRITE_ONCE(rdp->dynticks_nmi_nesting, 0);
626 	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
627 		     rdp->dynticks_nesting == 0);
628 	if (rdp->dynticks_nesting != 1) {
629 		// RCU will still be watching, so just do accounting and leave.
630 		rdp->dynticks_nesting--;
631 		return;
632 	}
633 
634 	lockdep_assert_irqs_disabled();
635 	instrumentation_begin();
636 	trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, atomic_read(&rdp->dynticks));
637 	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
638 	rdp = this_cpu_ptr(&rcu_data);
639 	rcu_prepare_for_idle();
640 	rcu_preempt_deferred_qs(current);
641 
642 	// instrumentation for the noinstr rcu_dynticks_eqs_enter()
643 	instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
644 
645 	instrumentation_end();
646 	WRITE_ONCE(rdp->dynticks_nesting, 0); /* Avoid irq-access tearing. */
647 	// RCU is watching here ...
648 	rcu_dynticks_eqs_enter();
649 	// ... but is no longer watching here.
650 	rcu_dynticks_task_enter();
651 }
652 
653 /**
654  * rcu_idle_enter - inform RCU that current CPU is entering idle
655  *
656  * Enter idle mode, in other words, -leave- the mode in which RCU
657  * read-side critical sections can occur.  (Though RCU read-side
658  * critical sections can occur in irq handlers in idle, a possibility
659  * handled by irq_enter() and irq_exit().)
660  *
661  * If you add or remove a call to rcu_idle_enter(), be sure to test with
662  * CONFIG_RCU_EQS_DEBUG=y.
663  */
rcu_idle_enter(void)664 void rcu_idle_enter(void)
665 {
666 	lockdep_assert_irqs_disabled();
667 	rcu_eqs_enter(false);
668 }
669 EXPORT_SYMBOL_GPL(rcu_idle_enter);
670 
671 #ifdef CONFIG_NO_HZ_FULL
672 /**
673  * rcu_user_enter - inform RCU that we are resuming userspace.
674  *
675  * Enter RCU idle mode right before resuming userspace.  No use of RCU
676  * is permitted between this call and rcu_user_exit(). This way the
677  * CPU doesn't need to maintain the tick for RCU maintenance purposes
678  * when the CPU runs in userspace.
679  *
680  * If you add or remove a call to rcu_user_enter(), be sure to test with
681  * CONFIG_RCU_EQS_DEBUG=y.
682  */
rcu_user_enter(void)683 noinstr void rcu_user_enter(void)
684 {
685 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
686 
687 	lockdep_assert_irqs_disabled();
688 
689 	instrumentation_begin();
690 	do_nocb_deferred_wakeup(rdp);
691 	instrumentation_end();
692 
693 	rcu_eqs_enter(true);
694 }
695 #endif /* CONFIG_NO_HZ_FULL */
696 
697 /**
698  * rcu_nmi_exit - inform RCU of exit from NMI context
699  *
700  * If we are returning from the outermost NMI handler that interrupted an
701  * RCU-idle period, update rdp->dynticks and rdp->dynticks_nmi_nesting
702  * to let the RCU grace-period handling know that the CPU is back to
703  * being RCU-idle.
704  *
705  * If you add or remove a call to rcu_nmi_exit(), be sure to test
706  * with CONFIG_RCU_EQS_DEBUG=y.
707  */
rcu_nmi_exit(void)708 noinstr void rcu_nmi_exit(void)
709 {
710 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
711 
712 	instrumentation_begin();
713 	/*
714 	 * Check for ->dynticks_nmi_nesting underflow and bad ->dynticks.
715 	 * (We are exiting an NMI handler, so RCU better be paying attention
716 	 * to us!)
717 	 */
718 	WARN_ON_ONCE(rdp->dynticks_nmi_nesting <= 0);
719 	WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs());
720 
721 	/*
722 	 * If the nesting level is not 1, the CPU wasn't RCU-idle, so
723 	 * leave it in non-RCU-idle state.
724 	 */
725 	if (rdp->dynticks_nmi_nesting != 1) {
726 		trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2,
727 				  atomic_read(&rdp->dynticks));
728 		WRITE_ONCE(rdp->dynticks_nmi_nesting, /* No store tearing. */
729 			   rdp->dynticks_nmi_nesting - 2);
730 		instrumentation_end();
731 		return;
732 	}
733 
734 	/* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
735 	trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, atomic_read(&rdp->dynticks));
736 	WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */
737 
738 	if (!in_nmi())
739 		rcu_prepare_for_idle();
740 
741 	// instrumentation for the noinstr rcu_dynticks_eqs_enter()
742 	instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
743 	instrumentation_end();
744 
745 	// RCU is watching here ...
746 	rcu_dynticks_eqs_enter();
747 	// ... but is no longer watching here.
748 
749 	if (!in_nmi())
750 		rcu_dynticks_task_enter();
751 }
752 
753 /**
754  * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle
755  *
756  * Exit from an interrupt handler, which might possibly result in entering
757  * idle mode, in other words, leaving the mode in which read-side critical
758  * sections can occur.  The caller must have disabled interrupts.
759  *
760  * This code assumes that the idle loop never does anything that might
761  * result in unbalanced calls to irq_enter() and irq_exit().  If your
762  * architecture's idle loop violates this assumption, RCU will give you what
763  * you deserve, good and hard.  But very infrequently and irreproducibly.
764  *
765  * Use things like work queues to work around this limitation.
766  *
767  * You have been warned.
768  *
769  * If you add or remove a call to rcu_irq_exit(), be sure to test with
770  * CONFIG_RCU_EQS_DEBUG=y.
771  */
rcu_irq_exit(void)772 void noinstr rcu_irq_exit(void)
773 {
774 	lockdep_assert_irqs_disabled();
775 	rcu_nmi_exit();
776 }
777 
778 /**
779  * rcu_irq_exit_preempt - Inform RCU that current CPU is exiting irq
780  *			  towards in kernel preemption
781  *
782  * Same as rcu_irq_exit() but has a sanity check that scheduling is safe
783  * from RCU point of view. Invoked from return from interrupt before kernel
784  * preemption.
785  */
rcu_irq_exit_preempt(void)786 void rcu_irq_exit_preempt(void)
787 {
788 	lockdep_assert_irqs_disabled();
789 	rcu_nmi_exit();
790 
791 	RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) <= 0,
792 			 "RCU dynticks_nesting counter underflow/zero!");
793 	RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) !=
794 			 DYNTICK_IRQ_NONIDLE,
795 			 "Bad RCU  dynticks_nmi_nesting counter\n");
796 	RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
797 			 "RCU in extended quiescent state!");
798 }
799 
800 #ifdef CONFIG_PROVE_RCU
801 /**
802  * rcu_irq_exit_check_preempt - Validate that scheduling is possible
803  */
rcu_irq_exit_check_preempt(void)804 void rcu_irq_exit_check_preempt(void)
805 {
806 	lockdep_assert_irqs_disabled();
807 
808 	RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) <= 0,
809 			 "RCU dynticks_nesting counter underflow/zero!");
810 	RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) !=
811 			 DYNTICK_IRQ_NONIDLE,
812 			 "Bad RCU  dynticks_nmi_nesting counter\n");
813 	RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
814 			 "RCU in extended quiescent state!");
815 }
816 #endif /* #ifdef CONFIG_PROVE_RCU */
817 
818 /*
819  * Wrapper for rcu_irq_exit() where interrupts are enabled.
820  *
821  * If you add or remove a call to rcu_irq_exit_irqson(), be sure to test
822  * with CONFIG_RCU_EQS_DEBUG=y.
823  */
rcu_irq_exit_irqson(void)824 void rcu_irq_exit_irqson(void)
825 {
826 	unsigned long flags;
827 
828 	local_irq_save(flags);
829 	rcu_irq_exit();
830 	local_irq_restore(flags);
831 }
832 
833 /*
834  * Exit an RCU extended quiescent state, which can be either the
835  * idle loop or adaptive-tickless usermode execution.
836  *
837  * We crowbar the ->dynticks_nmi_nesting field to DYNTICK_IRQ_NONIDLE to
838  * allow for the possibility of usermode upcalls messing up our count of
839  * interrupt nesting level during the busy period that is just now starting.
840  */
rcu_eqs_exit(bool user)841 static void noinstr rcu_eqs_exit(bool user)
842 {
843 	struct rcu_data *rdp;
844 	long oldval;
845 
846 	lockdep_assert_irqs_disabled();
847 	rdp = this_cpu_ptr(&rcu_data);
848 	oldval = rdp->dynticks_nesting;
849 	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0);
850 	if (oldval) {
851 		// RCU was already watching, so just do accounting and leave.
852 		rdp->dynticks_nesting++;
853 		return;
854 	}
855 	rcu_dynticks_task_exit();
856 	// RCU is not watching here ...
857 	rcu_dynticks_eqs_exit();
858 	// ... but is watching here.
859 	instrumentation_begin();
860 
861 	// instrumentation for the noinstr rcu_dynticks_eqs_exit()
862 	instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
863 
864 	rcu_cleanup_after_idle();
865 	trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, atomic_read(&rdp->dynticks));
866 	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
867 	WRITE_ONCE(rdp->dynticks_nesting, 1);
868 	WARN_ON_ONCE(rdp->dynticks_nmi_nesting);
869 	WRITE_ONCE(rdp->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE);
870 	instrumentation_end();
871 }
872 
873 /**
874  * rcu_idle_exit - inform RCU that current CPU is leaving idle
875  *
876  * Exit idle mode, in other words, -enter- the mode in which RCU
877  * read-side critical sections can occur.
878  *
879  * If you add or remove a call to rcu_idle_exit(), be sure to test with
880  * CONFIG_RCU_EQS_DEBUG=y.
881  */
rcu_idle_exit(void)882 void rcu_idle_exit(void)
883 {
884 	unsigned long flags;
885 
886 	local_irq_save(flags);
887 	rcu_eqs_exit(false);
888 	local_irq_restore(flags);
889 }
890 EXPORT_SYMBOL_GPL(rcu_idle_exit);
891 
892 #ifdef CONFIG_NO_HZ_FULL
893 /**
894  * rcu_user_exit - inform RCU that we are exiting userspace.
895  *
896  * Exit RCU idle mode while entering the kernel because it can
897  * run a RCU read side critical section anytime.
898  *
899  * If you add or remove a call to rcu_user_exit(), be sure to test with
900  * CONFIG_RCU_EQS_DEBUG=y.
901  */
rcu_user_exit(void)902 void noinstr rcu_user_exit(void)
903 {
904 	rcu_eqs_exit(1);
905 }
906 
907 /**
908  * __rcu_irq_enter_check_tick - Enable scheduler tick on CPU if RCU needs it.
909  *
910  * The scheduler tick is not normally enabled when CPUs enter the kernel
911  * from nohz_full userspace execution.  After all, nohz_full userspace
912  * execution is an RCU quiescent state and the time executing in the kernel
913  * is quite short.  Except of course when it isn't.  And it is not hard to
914  * cause a large system to spend tens of seconds or even minutes looping
915  * in the kernel, which can cause a number of problems, include RCU CPU
916  * stall warnings.
917  *
918  * Therefore, if a nohz_full CPU fails to report a quiescent state
919  * in a timely manner, the RCU grace-period kthread sets that CPU's
920  * ->rcu_urgent_qs flag with the expectation that the next interrupt or
921  * exception will invoke this function, which will turn on the scheduler
922  * tick, which will enable RCU to detect that CPU's quiescent states,
923  * for example, due to cond_resched() calls in CONFIG_PREEMPT=n kernels.
924  * The tick will be disabled once a quiescent state is reported for
925  * this CPU.
926  *
927  * Of course, in carefully tuned systems, there might never be an
928  * interrupt or exception.  In that case, the RCU grace-period kthread
929  * will eventually cause one to happen.  However, in less carefully
930  * controlled environments, this function allows RCU to get what it
931  * needs without creating otherwise useless interruptions.
932  */
__rcu_irq_enter_check_tick(void)933 void __rcu_irq_enter_check_tick(void)
934 {
935 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
936 
937 	// If we're here from NMI there's nothing to do.
938 	if (in_nmi())
939 		return;
940 
941 	RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
942 			 "Illegal rcu_irq_enter_check_tick() from extended quiescent state");
943 
944 	if (!tick_nohz_full_cpu(rdp->cpu) ||
945 	    !READ_ONCE(rdp->rcu_urgent_qs) ||
946 	    READ_ONCE(rdp->rcu_forced_tick)) {
947 		// RCU doesn't need nohz_full help from this CPU, or it is
948 		// already getting that help.
949 		return;
950 	}
951 
952 	// We get here only when not in an extended quiescent state and
953 	// from interrupts (as opposed to NMIs).  Therefore, (1) RCU is
954 	// already watching and (2) The fact that we are in an interrupt
955 	// handler and that the rcu_node lock is an irq-disabled lock
956 	// prevents self-deadlock.  So we can safely recheck under the lock.
957 	// Note that the nohz_full state currently cannot change.
958 	raw_spin_lock_rcu_node(rdp->mynode);
959 	if (rdp->rcu_urgent_qs && !rdp->rcu_forced_tick) {
960 		// A nohz_full CPU is in the kernel and RCU needs a
961 		// quiescent state.  Turn on the tick!
962 		WRITE_ONCE(rdp->rcu_forced_tick, true);
963 		tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
964 	}
965 	raw_spin_unlock_rcu_node(rdp->mynode);
966 }
967 #endif /* CONFIG_NO_HZ_FULL */
968 
969 /**
970  * rcu_nmi_enter - inform RCU of entry to NMI context
971  *
972  * If the CPU was idle from RCU's viewpoint, update rdp->dynticks and
973  * rdp->dynticks_nmi_nesting to let the RCU grace-period handling know
974  * that the CPU is active.  This implementation permits nested NMIs, as
975  * long as the nesting level does not overflow an int.  (You will probably
976  * run out of stack space first.)
977  *
978  * If you add or remove a call to rcu_nmi_enter(), be sure to test
979  * with CONFIG_RCU_EQS_DEBUG=y.
980  */
rcu_nmi_enter(void)981 noinstr void rcu_nmi_enter(void)
982 {
983 	long incby = 2;
984 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
985 
986 	/* Complain about underflow. */
987 	WARN_ON_ONCE(rdp->dynticks_nmi_nesting < 0);
988 
989 	/*
990 	 * If idle from RCU viewpoint, atomically increment ->dynticks
991 	 * to mark non-idle and increment ->dynticks_nmi_nesting by one.
992 	 * Otherwise, increment ->dynticks_nmi_nesting by two.  This means
993 	 * if ->dynticks_nmi_nesting is equal to one, we are guaranteed
994 	 * to be in the outermost NMI handler that interrupted an RCU-idle
995 	 * period (observation due to Andy Lutomirski).
996 	 */
997 	if (rcu_dynticks_curr_cpu_in_eqs()) {
998 
999 		if (!in_nmi())
1000 			rcu_dynticks_task_exit();
1001 
1002 		// RCU is not watching here ...
1003 		rcu_dynticks_eqs_exit();
1004 		// ... but is watching here.
1005 
1006 		if (!in_nmi()) {
1007 			instrumentation_begin();
1008 			rcu_cleanup_after_idle();
1009 			instrumentation_end();
1010 		}
1011 
1012 		instrumentation_begin();
1013 		// instrumentation for the noinstr rcu_dynticks_curr_cpu_in_eqs()
1014 		instrument_atomic_read(&rdp->dynticks, sizeof(rdp->dynticks));
1015 		// instrumentation for the noinstr rcu_dynticks_eqs_exit()
1016 		instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
1017 
1018 		incby = 1;
1019 	} else if (!in_nmi()) {
1020 		instrumentation_begin();
1021 		rcu_irq_enter_check_tick();
1022 	} else  {
1023 		instrumentation_begin();
1024 	}
1025 
1026 	trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="),
1027 			  rdp->dynticks_nmi_nesting,
1028 			  rdp->dynticks_nmi_nesting + incby, atomic_read(&rdp->dynticks));
1029 	instrumentation_end();
1030 	WRITE_ONCE(rdp->dynticks_nmi_nesting, /* Prevent store tearing. */
1031 		   rdp->dynticks_nmi_nesting + incby);
1032 	barrier();
1033 }
1034 
1035 /**
1036  * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
1037  *
1038  * Enter an interrupt handler, which might possibly result in exiting
1039  * idle mode, in other words, entering the mode in which read-side critical
1040  * sections can occur.  The caller must have disabled interrupts.
1041  *
1042  * Note that the Linux kernel is fully capable of entering an interrupt
1043  * handler that it never exits, for example when doing upcalls to user mode!
1044  * This code assumes that the idle loop never does upcalls to user mode.
1045  * If your architecture's idle loop does do upcalls to user mode (or does
1046  * anything else that results in unbalanced calls to the irq_enter() and
1047  * irq_exit() functions), RCU will give you what you deserve, good and hard.
1048  * But very infrequently and irreproducibly.
1049  *
1050  * Use things like work queues to work around this limitation.
1051  *
1052  * You have been warned.
1053  *
1054  * If you add or remove a call to rcu_irq_enter(), be sure to test with
1055  * CONFIG_RCU_EQS_DEBUG=y.
1056  */
rcu_irq_enter(void)1057 noinstr void rcu_irq_enter(void)
1058 {
1059 	lockdep_assert_irqs_disabled();
1060 	rcu_nmi_enter();
1061 }
1062 
1063 /*
1064  * Wrapper for rcu_irq_enter() where interrupts are enabled.
1065  *
1066  * If you add or remove a call to rcu_irq_enter_irqson(), be sure to test
1067  * with CONFIG_RCU_EQS_DEBUG=y.
1068  */
rcu_irq_enter_irqson(void)1069 void rcu_irq_enter_irqson(void)
1070 {
1071 	unsigned long flags;
1072 
1073 	local_irq_save(flags);
1074 	rcu_irq_enter();
1075 	local_irq_restore(flags);
1076 }
1077 
1078 /*
1079  * If any sort of urgency was applied to the current CPU (for example,
1080  * the scheduler-clock interrupt was enabled on a nohz_full CPU) in order
1081  * to get to a quiescent state, disable it.
1082  */
rcu_disable_urgency_upon_qs(struct rcu_data * rdp)1083 static void rcu_disable_urgency_upon_qs(struct rcu_data *rdp)
1084 {
1085 	raw_lockdep_assert_held_rcu_node(rdp->mynode);
1086 	WRITE_ONCE(rdp->rcu_urgent_qs, false);
1087 	WRITE_ONCE(rdp->rcu_need_heavy_qs, false);
1088 	if (tick_nohz_full_cpu(rdp->cpu) && rdp->rcu_forced_tick) {
1089 		tick_dep_clear_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
1090 		WRITE_ONCE(rdp->rcu_forced_tick, false);
1091 	}
1092 }
1093 
1094 /**
1095  * rcu_is_watching - see if RCU thinks that the current CPU is not idle
1096  *
1097  * Return true if RCU is watching the running CPU, which means that this
1098  * CPU can safely enter RCU read-side critical sections.  In other words,
1099  * if the current CPU is not in its idle loop or is in an interrupt or
1100  * NMI handler, return true.
1101  *
1102  * Make notrace because it can be called by the internal functions of
1103  * ftrace, and making this notrace removes unnecessary recursion calls.
1104  */
rcu_is_watching(void)1105 notrace bool rcu_is_watching(void)
1106 {
1107 	bool ret;
1108 
1109 	preempt_disable_notrace();
1110 	ret = !rcu_dynticks_curr_cpu_in_eqs();
1111 	preempt_enable_notrace();
1112 	return ret;
1113 }
1114 EXPORT_SYMBOL_GPL(rcu_is_watching);
1115 
1116 /*
1117  * If a holdout task is actually running, request an urgent quiescent
1118  * state from its CPU.  This is unsynchronized, so migrations can cause
1119  * the request to go to the wrong CPU.  Which is OK, all that will happen
1120  * is that the CPU's next context switch will be a bit slower and next
1121  * time around this task will generate another request.
1122  */
rcu_request_urgent_qs_task(struct task_struct * t)1123 void rcu_request_urgent_qs_task(struct task_struct *t)
1124 {
1125 	int cpu;
1126 
1127 	barrier();
1128 	cpu = task_cpu(t);
1129 	if (!task_curr(t))
1130 		return; /* This task is not running on that CPU. */
1131 	smp_store_release(per_cpu_ptr(&rcu_data.rcu_urgent_qs, cpu), true);
1132 }
1133 
1134 #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
1135 
1136 /*
1137  * Is the current CPU online as far as RCU is concerned?
1138  *
1139  * Disable preemption to avoid false positives that could otherwise
1140  * happen due to the current CPU number being sampled, this task being
1141  * preempted, its old CPU being taken offline, resuming on some other CPU,
1142  * then determining that its old CPU is now offline.
1143  *
1144  * Disable checking if in an NMI handler because we cannot safely
1145  * report errors from NMI handlers anyway.  In addition, it is OK to use
1146  * RCU on an offline processor during initial boot, hence the check for
1147  * rcu_scheduler_fully_active.
1148  */
rcu_lockdep_current_cpu_online(void)1149 bool rcu_lockdep_current_cpu_online(void)
1150 {
1151 	struct rcu_data *rdp;
1152 	struct rcu_node *rnp;
1153 	bool ret = false;
1154 
1155 	if (in_nmi() || !rcu_scheduler_fully_active)
1156 		return true;
1157 	preempt_disable_notrace();
1158 	rdp = this_cpu_ptr(&rcu_data);
1159 	rnp = rdp->mynode;
1160 	if (rdp->grpmask & rcu_rnp_online_cpus(rnp))
1161 		ret = true;
1162 	preempt_enable_notrace();
1163 	return ret;
1164 }
1165 EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
1166 
1167 #endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */
1168 
1169 /*
1170  * We are reporting a quiescent state on behalf of some other CPU, so
1171  * it is our responsibility to check for and handle potential overflow
1172  * of the rcu_node ->gp_seq counter with respect to the rcu_data counters.
1173  * After all, the CPU might be in deep idle state, and thus executing no
1174  * code whatsoever.
1175  */
rcu_gpnum_ovf(struct rcu_node * rnp,struct rcu_data * rdp)1176 static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp)
1177 {
1178 	raw_lockdep_assert_held_rcu_node(rnp);
1179 	if (ULONG_CMP_LT(rcu_seq_current(&rdp->gp_seq) + ULONG_MAX / 4,
1180 			 rnp->gp_seq))
1181 		WRITE_ONCE(rdp->gpwrap, true);
1182 	if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq))
1183 		rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4;
1184 }
1185 
1186 /*
1187  * Snapshot the specified CPU's dynticks counter so that we can later
1188  * credit them with an implicit quiescent state.  Return 1 if this CPU
1189  * is in dynticks idle mode, which is an extended quiescent state.
1190  */
dyntick_save_progress_counter(struct rcu_data * rdp)1191 static int dyntick_save_progress_counter(struct rcu_data *rdp)
1192 {
1193 	rdp->dynticks_snap = rcu_dynticks_snap(rdp);
1194 	if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) {
1195 		trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
1196 		rcu_gpnum_ovf(rdp->mynode, rdp);
1197 		return 1;
1198 	}
1199 	return 0;
1200 }
1201 
1202 /*
1203  * Return true if the specified CPU has passed through a quiescent
1204  * state by virtue of being in or having passed through an dynticks
1205  * idle state since the last call to dyntick_save_progress_counter()
1206  * for this same CPU, or by virtue of having been offline.
1207  */
rcu_implicit_dynticks_qs(struct rcu_data * rdp)1208 static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
1209 {
1210 	unsigned long jtsq;
1211 	bool *rnhqp;
1212 	bool *ruqp;
1213 	struct rcu_node *rnp = rdp->mynode;
1214 
1215 	/*
1216 	 * If the CPU passed through or entered a dynticks idle phase with
1217 	 * no active irq/NMI handlers, then we can safely pretend that the CPU
1218 	 * already acknowledged the request to pass through a quiescent
1219 	 * state.  Either way, that CPU cannot possibly be in an RCU
1220 	 * read-side critical section that started before the beginning
1221 	 * of the current RCU grace period.
1222 	 */
1223 	if (rcu_dynticks_in_eqs_since(rdp, rdp->dynticks_snap)) {
1224 		trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
1225 		rcu_gpnum_ovf(rnp, rdp);
1226 		return 1;
1227 	}
1228 
1229 	/*
1230 	 * Complain if a CPU that is considered to be offline from RCU's
1231 	 * perspective has not yet reported a quiescent state.  After all,
1232 	 * the offline CPU should have reported a quiescent state during
1233 	 * the CPU-offline process, or, failing that, by rcu_gp_init()
1234 	 * if it ran concurrently with either the CPU going offline or the
1235 	 * last task on a leaf rcu_node structure exiting its RCU read-side
1236 	 * critical section while all CPUs corresponding to that structure
1237 	 * are offline.  This added warning detects bugs in any of these
1238 	 * code paths.
1239 	 *
1240 	 * The rcu_node structure's ->lock is held here, which excludes
1241 	 * the relevant portions the CPU-hotplug code, the grace-period
1242 	 * initialization code, and the rcu_read_unlock() code paths.
1243 	 *
1244 	 * For more detail, please refer to the "Hotplug CPU" section
1245 	 * of RCU's Requirements documentation.
1246 	 */
1247 	if (WARN_ON_ONCE(!(rdp->grpmask & rcu_rnp_online_cpus(rnp)))) {
1248 		bool onl;
1249 		struct rcu_node *rnp1;
1250 
1251 		pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n",
1252 			__func__, rnp->grplo, rnp->grphi, rnp->level,
1253 			(long)rnp->gp_seq, (long)rnp->completedqs);
1254 		for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent)
1255 			pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx ->rcu_gp_init_mask %#lx\n",
1256 				__func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext, rnp1->rcu_gp_init_mask);
1257 		onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp));
1258 		pr_info("%s %d: %c online: %ld(%d) offline: %ld(%d)\n",
1259 			__func__, rdp->cpu, ".o"[onl],
1260 			(long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags,
1261 			(long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags);
1262 		return 1; /* Break things loose after complaining. */
1263 	}
1264 
1265 	/*
1266 	 * A CPU running for an extended time within the kernel can
1267 	 * delay RCU grace periods: (1) At age jiffies_to_sched_qs,
1268 	 * set .rcu_urgent_qs, (2) At age 2*jiffies_to_sched_qs, set
1269 	 * both .rcu_need_heavy_qs and .rcu_urgent_qs.  Note that the
1270 	 * unsynchronized assignments to the per-CPU rcu_need_heavy_qs
1271 	 * variable are safe because the assignments are repeated if this
1272 	 * CPU failed to pass through a quiescent state.  This code
1273 	 * also checks .jiffies_resched in case jiffies_to_sched_qs
1274 	 * is set way high.
1275 	 */
1276 	jtsq = READ_ONCE(jiffies_to_sched_qs);
1277 	ruqp = per_cpu_ptr(&rcu_data.rcu_urgent_qs, rdp->cpu);
1278 	rnhqp = &per_cpu(rcu_data.rcu_need_heavy_qs, rdp->cpu);
1279 	if (!READ_ONCE(*rnhqp) &&
1280 	    (time_after(jiffies, rcu_state.gp_start + jtsq * 2) ||
1281 	     time_after(jiffies, rcu_state.jiffies_resched) ||
1282 	     rcu_state.cbovld)) {
1283 		WRITE_ONCE(*rnhqp, true);
1284 		/* Store rcu_need_heavy_qs before rcu_urgent_qs. */
1285 		smp_store_release(ruqp, true);
1286 	} else if (time_after(jiffies, rcu_state.gp_start + jtsq)) {
1287 		WRITE_ONCE(*ruqp, true);
1288 	}
1289 
1290 	/*
1291 	 * NO_HZ_FULL CPUs can run in-kernel without rcu_sched_clock_irq!
1292 	 * The above code handles this, but only for straight cond_resched().
1293 	 * And some in-kernel loops check need_resched() before calling
1294 	 * cond_resched(), which defeats the above code for CPUs that are
1295 	 * running in-kernel with scheduling-clock interrupts disabled.
1296 	 * So hit them over the head with the resched_cpu() hammer!
1297 	 */
1298 	if (tick_nohz_full_cpu(rdp->cpu) &&
1299 	    (time_after(jiffies, READ_ONCE(rdp->last_fqs_resched) + jtsq * 3) ||
1300 	     rcu_state.cbovld)) {
1301 		WRITE_ONCE(*ruqp, true);
1302 		resched_cpu(rdp->cpu);
1303 		WRITE_ONCE(rdp->last_fqs_resched, jiffies);
1304 	}
1305 
1306 	/*
1307 	 * If more than halfway to RCU CPU stall-warning time, invoke
1308 	 * resched_cpu() more frequently to try to loosen things up a bit.
1309 	 * Also check to see if the CPU is getting hammered with interrupts,
1310 	 * but only once per grace period, just to keep the IPIs down to
1311 	 * a dull roar.
1312 	 */
1313 	if (time_after(jiffies, rcu_state.jiffies_resched)) {
1314 		if (time_after(jiffies,
1315 			       READ_ONCE(rdp->last_fqs_resched) + jtsq)) {
1316 			resched_cpu(rdp->cpu);
1317 			WRITE_ONCE(rdp->last_fqs_resched, jiffies);
1318 		}
1319 		if (IS_ENABLED(CONFIG_IRQ_WORK) &&
1320 		    !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
1321 		    (rnp->ffmask & rdp->grpmask)) {
1322 			init_irq_work(&rdp->rcu_iw, rcu_iw_handler);
1323 			atomic_set(&rdp->rcu_iw.flags, IRQ_WORK_HARD_IRQ);
1324 			rdp->rcu_iw_pending = true;
1325 			rdp->rcu_iw_gp_seq = rnp->gp_seq;
1326 			irq_work_queue_on(&rdp->rcu_iw, rdp->cpu);
1327 		}
1328 	}
1329 
1330 	return 0;
1331 }
1332 
1333 /* Trace-event wrapper function for trace_rcu_future_grace_period.  */
trace_rcu_this_gp(struct rcu_node * rnp,struct rcu_data * rdp,unsigned long gp_seq_req,const char * s)1334 static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
1335 			      unsigned long gp_seq_req, const char *s)
1336 {
1337 	trace_rcu_future_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
1338 				      gp_seq_req, rnp->level,
1339 				      rnp->grplo, rnp->grphi, s);
1340 }
1341 
1342 /*
1343  * rcu_start_this_gp - Request the start of a particular grace period
1344  * @rnp_start: The leaf node of the CPU from which to start.
1345  * @rdp: The rcu_data corresponding to the CPU from which to start.
1346  * @gp_seq_req: The gp_seq of the grace period to start.
1347  *
1348  * Start the specified grace period, as needed to handle newly arrived
1349  * callbacks.  The required future grace periods are recorded in each
1350  * rcu_node structure's ->gp_seq_needed field.  Returns true if there
1351  * is reason to awaken the grace-period kthread.
1352  *
1353  * The caller must hold the specified rcu_node structure's ->lock, which
1354  * is why the caller is responsible for waking the grace-period kthread.
1355  *
1356  * Returns true if the GP thread needs to be awakened else false.
1357  */
rcu_start_this_gp(struct rcu_node * rnp_start,struct rcu_data * rdp,unsigned long gp_seq_req)1358 static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp,
1359 			      unsigned long gp_seq_req)
1360 {
1361 	bool ret = false;
1362 	struct rcu_node *rnp;
1363 
1364 	/*
1365 	 * Use funnel locking to either acquire the root rcu_node
1366 	 * structure's lock or bail out if the need for this grace period
1367 	 * has already been recorded -- or if that grace period has in
1368 	 * fact already started.  If there is already a grace period in
1369 	 * progress in a non-leaf node, no recording is needed because the
1370 	 * end of the grace period will scan the leaf rcu_node structures.
1371 	 * Note that rnp_start->lock must not be released.
1372 	 */
1373 	raw_lockdep_assert_held_rcu_node(rnp_start);
1374 	trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, TPS("Startleaf"));
1375 	for (rnp = rnp_start; 1; rnp = rnp->parent) {
1376 		if (rnp != rnp_start)
1377 			raw_spin_lock_rcu_node(rnp);
1378 		if (ULONG_CMP_GE(rnp->gp_seq_needed, gp_seq_req) ||
1379 		    rcu_seq_started(&rnp->gp_seq, gp_seq_req) ||
1380 		    (rnp != rnp_start &&
1381 		     rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))) {
1382 			trace_rcu_this_gp(rnp, rdp, gp_seq_req,
1383 					  TPS("Prestarted"));
1384 			goto unlock_out;
1385 		}
1386 		WRITE_ONCE(rnp->gp_seq_needed, gp_seq_req);
1387 		if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) {
1388 			/*
1389 			 * We just marked the leaf or internal node, and a
1390 			 * grace period is in progress, which means that
1391 			 * rcu_gp_cleanup() will see the marking.  Bail to
1392 			 * reduce contention.
1393 			 */
1394 			trace_rcu_this_gp(rnp_start, rdp, gp_seq_req,
1395 					  TPS("Startedleaf"));
1396 			goto unlock_out;
1397 		}
1398 		if (rnp != rnp_start && rnp->parent != NULL)
1399 			raw_spin_unlock_rcu_node(rnp);
1400 		if (!rnp->parent)
1401 			break;  /* At root, and perhaps also leaf. */
1402 	}
1403 
1404 	/* If GP already in progress, just leave, otherwise start one. */
1405 	if (rcu_gp_in_progress()) {
1406 		trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot"));
1407 		goto unlock_out;
1408 	}
1409 	trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot"));
1410 	WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags | RCU_GP_FLAG_INIT);
1411 	WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
1412 	if (!READ_ONCE(rcu_state.gp_kthread)) {
1413 		trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread"));
1414 		goto unlock_out;
1415 	}
1416 	trace_rcu_grace_period(rcu_state.name, data_race(rcu_state.gp_seq), TPS("newreq"));
1417 	ret = true;  /* Caller must wake GP kthread. */
1418 unlock_out:
1419 	/* Push furthest requested GP to leaf node and rcu_data structure. */
1420 	if (ULONG_CMP_LT(gp_seq_req, rnp->gp_seq_needed)) {
1421 		WRITE_ONCE(rnp_start->gp_seq_needed, rnp->gp_seq_needed);
1422 		WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed);
1423 	}
1424 	if (rnp != rnp_start)
1425 		raw_spin_unlock_rcu_node(rnp);
1426 	return ret;
1427 }
1428 
1429 /*
1430  * Clean up any old requests for the just-ended grace period.  Also return
1431  * whether any additional grace periods have been requested.
1432  */
rcu_future_gp_cleanup(struct rcu_node * rnp)1433 static bool rcu_future_gp_cleanup(struct rcu_node *rnp)
1434 {
1435 	bool needmore;
1436 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
1437 
1438 	needmore = ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed);
1439 	if (!needmore)
1440 		rnp->gp_seq_needed = rnp->gp_seq; /* Avoid counter wrap. */
1441 	trace_rcu_this_gp(rnp, rdp, rnp->gp_seq,
1442 			  needmore ? TPS("CleanupMore") : TPS("Cleanup"));
1443 	return needmore;
1444 }
1445 
1446 /*
1447  * Awaken the grace-period kthread.  Don't do a self-awaken (unless in an
1448  * interrupt or softirq handler, in which case we just might immediately
1449  * sleep upon return, resulting in a grace-period hang), and don't bother
1450  * awakening when there is nothing for the grace-period kthread to do
1451  * (as in several CPUs raced to awaken, we lost), and finally don't try
1452  * to awaken a kthread that has not yet been created.  If all those checks
1453  * are passed, track some debug information and awaken.
1454  *
1455  * So why do the self-wakeup when in an interrupt or softirq handler
1456  * in the grace-period kthread's context?  Because the kthread might have
1457  * been interrupted just as it was going to sleep, and just after the final
1458  * pre-sleep check of the awaken condition.  In this case, a wakeup really
1459  * is required, and is therefore supplied.
1460  */
rcu_gp_kthread_wake(void)1461 static void rcu_gp_kthread_wake(void)
1462 {
1463 	struct task_struct *t = READ_ONCE(rcu_state.gp_kthread);
1464 
1465 	if ((current == t && !in_irq() && !in_serving_softirq()) ||
1466 	    !READ_ONCE(rcu_state.gp_flags) || !t)
1467 		return;
1468 	WRITE_ONCE(rcu_state.gp_wake_time, jiffies);
1469 	WRITE_ONCE(rcu_state.gp_wake_seq, READ_ONCE(rcu_state.gp_seq));
1470 	swake_up_one(&rcu_state.gp_wq);
1471 }
1472 
1473 /*
1474  * If there is room, assign a ->gp_seq number to any callbacks on this
1475  * CPU that have not already been assigned.  Also accelerate any callbacks
1476  * that were previously assigned a ->gp_seq number that has since proven
1477  * to be too conservative, which can happen if callbacks get assigned a
1478  * ->gp_seq number while RCU is idle, but with reference to a non-root
1479  * rcu_node structure.  This function is idempotent, so it does not hurt
1480  * to call it repeatedly.  Returns an flag saying that we should awaken
1481  * the RCU grace-period kthread.
1482  *
1483  * The caller must hold rnp->lock with interrupts disabled.
1484  */
rcu_accelerate_cbs(struct rcu_node * rnp,struct rcu_data * rdp)1485 static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
1486 {
1487 	unsigned long gp_seq_req;
1488 	bool ret = false;
1489 
1490 	rcu_lockdep_assert_cblist_protected(rdp);
1491 	raw_lockdep_assert_held_rcu_node(rnp);
1492 
1493 	/* If no pending (not yet ready to invoke) callbacks, nothing to do. */
1494 	if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1495 		return false;
1496 
1497 	/*
1498 	 * Callbacks are often registered with incomplete grace-period
1499 	 * information.  Something about the fact that getting exact
1500 	 * information requires acquiring a global lock...  RCU therefore
1501 	 * makes a conservative estimate of the grace period number at which
1502 	 * a given callback will become ready to invoke.	The following
1503 	 * code checks this estimate and improves it when possible, thus
1504 	 * accelerating callback invocation to an earlier grace-period
1505 	 * number.
1506 	 */
1507 	gp_seq_req = rcu_seq_snap(&rcu_state.gp_seq);
1508 	if (rcu_segcblist_accelerate(&rdp->cblist, gp_seq_req))
1509 		ret = rcu_start_this_gp(rnp, rdp, gp_seq_req);
1510 
1511 	/* Trace depending on how much we were able to accelerate. */
1512 	if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL))
1513 		trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccWaitCB"));
1514 	else
1515 		trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccReadyCB"));
1516 
1517 	return ret;
1518 }
1519 
1520 /*
1521  * Similar to rcu_accelerate_cbs(), but does not require that the leaf
1522  * rcu_node structure's ->lock be held.  It consults the cached value
1523  * of ->gp_seq_needed in the rcu_data structure, and if that indicates
1524  * that a new grace-period request be made, invokes rcu_accelerate_cbs()
1525  * while holding the leaf rcu_node structure's ->lock.
1526  */
rcu_accelerate_cbs_unlocked(struct rcu_node * rnp,struct rcu_data * rdp)1527 static void rcu_accelerate_cbs_unlocked(struct rcu_node *rnp,
1528 					struct rcu_data *rdp)
1529 {
1530 	unsigned long c;
1531 	bool needwake;
1532 
1533 	rcu_lockdep_assert_cblist_protected(rdp);
1534 	c = rcu_seq_snap(&rcu_state.gp_seq);
1535 	if (!READ_ONCE(rdp->gpwrap) && ULONG_CMP_GE(rdp->gp_seq_needed, c)) {
1536 		/* Old request still live, so mark recent callbacks. */
1537 		(void)rcu_segcblist_accelerate(&rdp->cblist, c);
1538 		return;
1539 	}
1540 	raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
1541 	needwake = rcu_accelerate_cbs(rnp, rdp);
1542 	raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
1543 	if (needwake)
1544 		rcu_gp_kthread_wake();
1545 }
1546 
1547 /*
1548  * Move any callbacks whose grace period has completed to the
1549  * RCU_DONE_TAIL sublist, then compact the remaining sublists and
1550  * assign ->gp_seq numbers to any callbacks in the RCU_NEXT_TAIL
1551  * sublist.  This function is idempotent, so it does not hurt to
1552  * invoke it repeatedly.  As long as it is not invoked -too- often...
1553  * Returns true if the RCU grace-period kthread needs to be awakened.
1554  *
1555  * The caller must hold rnp->lock with interrupts disabled.
1556  */
rcu_advance_cbs(struct rcu_node * rnp,struct rcu_data * rdp)1557 static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
1558 {
1559 	rcu_lockdep_assert_cblist_protected(rdp);
1560 	raw_lockdep_assert_held_rcu_node(rnp);
1561 
1562 	/* If no pending (not yet ready to invoke) callbacks, nothing to do. */
1563 	if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1564 		return false;
1565 
1566 	/*
1567 	 * Find all callbacks whose ->gp_seq numbers indicate that they
1568 	 * are ready to invoke, and put them into the RCU_DONE_TAIL sublist.
1569 	 */
1570 	rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq);
1571 
1572 	/* Classify any remaining callbacks. */
1573 	return rcu_accelerate_cbs(rnp, rdp);
1574 }
1575 
1576 /*
1577  * Move and classify callbacks, but only if doing so won't require
1578  * that the RCU grace-period kthread be awakened.
1579  */
rcu_advance_cbs_nowake(struct rcu_node * rnp,struct rcu_data * rdp)1580 static void __maybe_unused rcu_advance_cbs_nowake(struct rcu_node *rnp,
1581 						  struct rcu_data *rdp)
1582 {
1583 	rcu_lockdep_assert_cblist_protected(rdp);
1584 	if (!rcu_seq_state(rcu_seq_current(&rnp->gp_seq)) || !raw_spin_trylock_rcu_node(rnp))
1585 		return;
1586 	// The grace period cannot end while we hold the rcu_node lock.
1587 	if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))
1588 		WARN_ON_ONCE(rcu_advance_cbs(rnp, rdp));
1589 	raw_spin_unlock_rcu_node(rnp);
1590 }
1591 
1592 /*
1593  * In CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels, attempt to generate a
1594  * quiescent state.  This is intended to be invoked when the CPU notices
1595  * a new grace period.
1596  */
rcu_strict_gp_check_qs(void)1597 static void rcu_strict_gp_check_qs(void)
1598 {
1599 	if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) {
1600 		rcu_read_lock();
1601 		rcu_read_unlock();
1602 	}
1603 }
1604 
1605 /*
1606  * Update CPU-local rcu_data state to record the beginnings and ends of
1607  * grace periods.  The caller must hold the ->lock of the leaf rcu_node
1608  * structure corresponding to the current CPU, and must have irqs disabled.
1609  * Returns true if the grace-period kthread needs to be awakened.
1610  */
__note_gp_changes(struct rcu_node * rnp,struct rcu_data * rdp)1611 static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp)
1612 {
1613 	bool ret = false;
1614 	bool need_qs;
1615 	const bool offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
1616 			       rcu_segcblist_is_offloaded(&rdp->cblist);
1617 
1618 	raw_lockdep_assert_held_rcu_node(rnp);
1619 
1620 	if (rdp->gp_seq == rnp->gp_seq)
1621 		return false; /* Nothing to do. */
1622 
1623 	/* Handle the ends of any preceding grace periods first. */
1624 	if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) ||
1625 	    unlikely(READ_ONCE(rdp->gpwrap))) {
1626 		if (!offloaded)
1627 			ret = rcu_advance_cbs(rnp, rdp); /* Advance CBs. */
1628 		rdp->core_needs_qs = false;
1629 		trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuend"));
1630 	} else {
1631 		if (!offloaded)
1632 			ret = rcu_accelerate_cbs(rnp, rdp); /* Recent CBs. */
1633 		if (rdp->core_needs_qs)
1634 			rdp->core_needs_qs = !!(rnp->qsmask & rdp->grpmask);
1635 	}
1636 
1637 	/* Now handle the beginnings of any new-to-this-CPU grace periods. */
1638 	if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) ||
1639 	    unlikely(READ_ONCE(rdp->gpwrap))) {
1640 		/*
1641 		 * If the current grace period is waiting for this CPU,
1642 		 * set up to detect a quiescent state, otherwise don't
1643 		 * go looking for one.
1644 		 */
1645 		trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, TPS("cpustart"));
1646 		need_qs = !!(rnp->qsmask & rdp->grpmask);
1647 		rdp->cpu_no_qs.b.norm = need_qs;
1648 		rdp->core_needs_qs = need_qs;
1649 		zero_cpu_stall_ticks(rdp);
1650 	}
1651 	rdp->gp_seq = rnp->gp_seq;  /* Remember new grace-period state. */
1652 	if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap)
1653 		WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed);
1654 	WRITE_ONCE(rdp->gpwrap, false);
1655 	rcu_gpnum_ovf(rnp, rdp);
1656 	return ret;
1657 }
1658 
note_gp_changes(struct rcu_data * rdp)1659 static void note_gp_changes(struct rcu_data *rdp)
1660 {
1661 	unsigned long flags;
1662 	bool needwake;
1663 	struct rcu_node *rnp;
1664 
1665 	local_irq_save(flags);
1666 	rnp = rdp->mynode;
1667 	if ((rdp->gp_seq == rcu_seq_current(&rnp->gp_seq) &&
1668 	     !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */
1669 	    !raw_spin_trylock_rcu_node(rnp)) { /* irqs already off, so later. */
1670 		local_irq_restore(flags);
1671 		return;
1672 	}
1673 	needwake = __note_gp_changes(rnp, rdp);
1674 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1675 	rcu_strict_gp_check_qs();
1676 	if (needwake)
1677 		rcu_gp_kthread_wake();
1678 }
1679 
rcu_gp_slow(int delay)1680 static void rcu_gp_slow(int delay)
1681 {
1682 	if (delay > 0 &&
1683 	    !(rcu_seq_ctr(rcu_state.gp_seq) %
1684 	      (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay)))
1685 		schedule_timeout_idle(delay);
1686 }
1687 
1688 static unsigned long sleep_duration;
1689 
1690 /* Allow rcutorture to stall the grace-period kthread. */
rcu_gp_set_torture_wait(int duration)1691 void rcu_gp_set_torture_wait(int duration)
1692 {
1693 	if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST) && duration > 0)
1694 		WRITE_ONCE(sleep_duration, duration);
1695 }
1696 EXPORT_SYMBOL_GPL(rcu_gp_set_torture_wait);
1697 
1698 /* Actually implement the aforementioned wait. */
rcu_gp_torture_wait(void)1699 static void rcu_gp_torture_wait(void)
1700 {
1701 	unsigned long duration;
1702 
1703 	if (!IS_ENABLED(CONFIG_RCU_TORTURE_TEST))
1704 		return;
1705 	duration = xchg(&sleep_duration, 0UL);
1706 	if (duration > 0) {
1707 		pr_alert("%s: Waiting %lu jiffies\n", __func__, duration);
1708 		schedule_timeout_idle(duration);
1709 		pr_alert("%s: Wait complete\n", __func__);
1710 	}
1711 }
1712 
1713 /*
1714  * Handler for on_each_cpu() to invoke the target CPU's RCU core
1715  * processing.
1716  */
rcu_strict_gp_boundary(void * unused)1717 static void rcu_strict_gp_boundary(void *unused)
1718 {
1719 	invoke_rcu_core();
1720 }
1721 
1722 /*
1723  * Initialize a new grace period.  Return false if no grace period required.
1724  */
rcu_gp_init(void)1725 static bool rcu_gp_init(void)
1726 {
1727 	unsigned long flags;
1728 	unsigned long oldmask;
1729 	unsigned long mask;
1730 	struct rcu_data *rdp;
1731 	struct rcu_node *rnp = rcu_get_root();
1732 
1733 	WRITE_ONCE(rcu_state.gp_activity, jiffies);
1734 	raw_spin_lock_irq_rcu_node(rnp);
1735 	if (!READ_ONCE(rcu_state.gp_flags)) {
1736 		/* Spurious wakeup, tell caller to go back to sleep.  */
1737 		raw_spin_unlock_irq_rcu_node(rnp);
1738 		return false;
1739 	}
1740 	WRITE_ONCE(rcu_state.gp_flags, 0); /* Clear all flags: New GP. */
1741 
1742 	if (WARN_ON_ONCE(rcu_gp_in_progress())) {
1743 		/*
1744 		 * Grace period already in progress, don't start another.
1745 		 * Not supposed to be able to happen.
1746 		 */
1747 		raw_spin_unlock_irq_rcu_node(rnp);
1748 		return false;
1749 	}
1750 
1751 	/* Advance to a new grace period and initialize state. */
1752 	record_gp_stall_check_time();
1753 	/* Record GP times before starting GP, hence rcu_seq_start(). */
1754 	rcu_seq_start(&rcu_state.gp_seq);
1755 	ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq);
1756 	trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("start"));
1757 	raw_spin_unlock_irq_rcu_node(rnp);
1758 
1759 	/*
1760 	 * Apply per-leaf buffered online and offline operations to
1761 	 * the rcu_node tree. Note that this new grace period need not
1762 	 * wait for subsequent online CPUs, and that RCU hooks in the CPU
1763 	 * offlining path, when combined with checks in this function,
1764 	 * will handle CPUs that are currently going offline or that will
1765 	 * go offline later.  Please also refer to "Hotplug CPU" section
1766 	 * of RCU's Requirements documentation.
1767 	 */
1768 	rcu_state.gp_state = RCU_GP_ONOFF;
1769 	rcu_for_each_leaf_node(rnp) {
1770 		raw_spin_lock(&rcu_state.ofl_lock);
1771 		raw_spin_lock_irq_rcu_node(rnp);
1772 		if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
1773 		    !rnp->wait_blkd_tasks) {
1774 			/* Nothing to do on this leaf rcu_node structure. */
1775 			raw_spin_unlock_irq_rcu_node(rnp);
1776 			raw_spin_unlock(&rcu_state.ofl_lock);
1777 			continue;
1778 		}
1779 
1780 		/* Record old state, apply changes to ->qsmaskinit field. */
1781 		oldmask = rnp->qsmaskinit;
1782 		rnp->qsmaskinit = rnp->qsmaskinitnext;
1783 
1784 		/* If zero-ness of ->qsmaskinit changed, propagate up tree. */
1785 		if (!oldmask != !rnp->qsmaskinit) {
1786 			if (!oldmask) { /* First online CPU for rcu_node. */
1787 				if (!rnp->wait_blkd_tasks) /* Ever offline? */
1788 					rcu_init_new_rnp(rnp);
1789 			} else if (rcu_preempt_has_tasks(rnp)) {
1790 				rnp->wait_blkd_tasks = true; /* blocked tasks */
1791 			} else { /* Last offline CPU and can propagate. */
1792 				rcu_cleanup_dead_rnp(rnp);
1793 			}
1794 		}
1795 
1796 		/*
1797 		 * If all waited-on tasks from prior grace period are
1798 		 * done, and if all this rcu_node structure's CPUs are
1799 		 * still offline, propagate up the rcu_node tree and
1800 		 * clear ->wait_blkd_tasks.  Otherwise, if one of this
1801 		 * rcu_node structure's CPUs has since come back online,
1802 		 * simply clear ->wait_blkd_tasks.
1803 		 */
1804 		if (rnp->wait_blkd_tasks &&
1805 		    (!rcu_preempt_has_tasks(rnp) || rnp->qsmaskinit)) {
1806 			rnp->wait_blkd_tasks = false;
1807 			if (!rnp->qsmaskinit)
1808 				rcu_cleanup_dead_rnp(rnp);
1809 		}
1810 
1811 		raw_spin_unlock_irq_rcu_node(rnp);
1812 		raw_spin_unlock(&rcu_state.ofl_lock);
1813 	}
1814 	rcu_gp_slow(gp_preinit_delay); /* Races with CPU hotplug. */
1815 
1816 	/*
1817 	 * Set the quiescent-state-needed bits in all the rcu_node
1818 	 * structures for all currently online CPUs in breadth-first
1819 	 * order, starting from the root rcu_node structure, relying on the
1820 	 * layout of the tree within the rcu_state.node[] array.  Note that
1821 	 * other CPUs will access only the leaves of the hierarchy, thus
1822 	 * seeing that no grace period is in progress, at least until the
1823 	 * corresponding leaf node has been initialized.
1824 	 *
1825 	 * The grace period cannot complete until the initialization
1826 	 * process finishes, because this kthread handles both.
1827 	 */
1828 	rcu_state.gp_state = RCU_GP_INIT;
1829 	rcu_for_each_node_breadth_first(rnp) {
1830 		rcu_gp_slow(gp_init_delay);
1831 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
1832 		rdp = this_cpu_ptr(&rcu_data);
1833 		rcu_preempt_check_blocked_tasks(rnp);
1834 		rnp->qsmask = rnp->qsmaskinit;
1835 		WRITE_ONCE(rnp->gp_seq, rcu_state.gp_seq);
1836 		if (rnp == rdp->mynode)
1837 			(void)__note_gp_changes(rnp, rdp);
1838 		rcu_preempt_boost_start_gp(rnp);
1839 		trace_rcu_grace_period_init(rcu_state.name, rnp->gp_seq,
1840 					    rnp->level, rnp->grplo,
1841 					    rnp->grphi, rnp->qsmask);
1842 		/* Quiescent states for tasks on any now-offline CPUs. */
1843 		mask = rnp->qsmask & ~rnp->qsmaskinitnext;
1844 		rnp->rcu_gp_init_mask = mask;
1845 		if ((mask || rnp->wait_blkd_tasks) && rcu_is_leaf_node(rnp))
1846 			rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
1847 		else
1848 			raw_spin_unlock_irq_rcu_node(rnp);
1849 		cond_resched_tasks_rcu_qs();
1850 		WRITE_ONCE(rcu_state.gp_activity, jiffies);
1851 	}
1852 
1853 	// If strict, make all CPUs aware of new grace period.
1854 	if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
1855 		on_each_cpu(rcu_strict_gp_boundary, NULL, 0);
1856 
1857 	return true;
1858 }
1859 
1860 /*
1861  * Helper function for swait_event_idle_exclusive() wakeup at force-quiescent-state
1862  * time.
1863  */
rcu_gp_fqs_check_wake(int * gfp)1864 static bool rcu_gp_fqs_check_wake(int *gfp)
1865 {
1866 	struct rcu_node *rnp = rcu_get_root();
1867 
1868 	// If under overload conditions, force an immediate FQS scan.
1869 	if (*gfp & RCU_GP_FLAG_OVLD)
1870 		return true;
1871 
1872 	// Someone like call_rcu() requested a force-quiescent-state scan.
1873 	*gfp = READ_ONCE(rcu_state.gp_flags);
1874 	if (*gfp & RCU_GP_FLAG_FQS)
1875 		return true;
1876 
1877 	// The current grace period has completed.
1878 	if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp))
1879 		return true;
1880 
1881 	return false;
1882 }
1883 
1884 /*
1885  * Do one round of quiescent-state forcing.
1886  */
rcu_gp_fqs(bool first_time)1887 static void rcu_gp_fqs(bool first_time)
1888 {
1889 	struct rcu_node *rnp = rcu_get_root();
1890 
1891 	WRITE_ONCE(rcu_state.gp_activity, jiffies);
1892 	WRITE_ONCE(rcu_state.n_force_qs, rcu_state.n_force_qs + 1);
1893 	if (first_time) {
1894 		/* Collect dyntick-idle snapshots. */
1895 		force_qs_rnp(dyntick_save_progress_counter);
1896 	} else {
1897 		/* Handle dyntick-idle and offline CPUs. */
1898 		force_qs_rnp(rcu_implicit_dynticks_qs);
1899 	}
1900 	/* Clear flag to prevent immediate re-entry. */
1901 	if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
1902 		raw_spin_lock_irq_rcu_node(rnp);
1903 		WRITE_ONCE(rcu_state.gp_flags,
1904 			   READ_ONCE(rcu_state.gp_flags) & ~RCU_GP_FLAG_FQS);
1905 		raw_spin_unlock_irq_rcu_node(rnp);
1906 	}
1907 }
1908 
1909 /*
1910  * Loop doing repeated quiescent-state forcing until the grace period ends.
1911  */
rcu_gp_fqs_loop(void)1912 static void rcu_gp_fqs_loop(void)
1913 {
1914 	bool first_gp_fqs;
1915 	int gf = 0;
1916 	unsigned long j;
1917 	int ret;
1918 	struct rcu_node *rnp = rcu_get_root();
1919 
1920 	first_gp_fqs = true;
1921 	j = READ_ONCE(jiffies_till_first_fqs);
1922 	if (rcu_state.cbovld)
1923 		gf = RCU_GP_FLAG_OVLD;
1924 	ret = 0;
1925 	for (;;) {
1926 		if (!ret) {
1927 			rcu_state.jiffies_force_qs = jiffies + j;
1928 			WRITE_ONCE(rcu_state.jiffies_kick_kthreads,
1929 				   jiffies + (j ? 3 * j : 2));
1930 		}
1931 		trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1932 				       TPS("fqswait"));
1933 		rcu_state.gp_state = RCU_GP_WAIT_FQS;
1934 		ret = swait_event_idle_timeout_exclusive(
1935 				rcu_state.gp_wq, rcu_gp_fqs_check_wake(&gf), j);
1936 		rcu_gp_torture_wait();
1937 		rcu_state.gp_state = RCU_GP_DOING_FQS;
1938 		/* Locking provides needed memory barriers. */
1939 		/* If grace period done, leave loop. */
1940 		if (!READ_ONCE(rnp->qsmask) &&
1941 		    !rcu_preempt_blocked_readers_cgp(rnp))
1942 			break;
1943 		/* If time for quiescent-state forcing, do it. */
1944 		if (!time_after(rcu_state.jiffies_force_qs, jiffies) ||
1945 		    (gf & (RCU_GP_FLAG_FQS | RCU_GP_FLAG_OVLD))) {
1946 			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1947 					       TPS("fqsstart"));
1948 			rcu_gp_fqs(first_gp_fqs);
1949 			gf = 0;
1950 			if (first_gp_fqs) {
1951 				first_gp_fqs = false;
1952 				gf = rcu_state.cbovld ? RCU_GP_FLAG_OVLD : 0;
1953 			}
1954 			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1955 					       TPS("fqsend"));
1956 			cond_resched_tasks_rcu_qs();
1957 			WRITE_ONCE(rcu_state.gp_activity, jiffies);
1958 			ret = 0; /* Force full wait till next FQS. */
1959 			j = READ_ONCE(jiffies_till_next_fqs);
1960 		} else {
1961 			/* Deal with stray signal. */
1962 			cond_resched_tasks_rcu_qs();
1963 			WRITE_ONCE(rcu_state.gp_activity, jiffies);
1964 			WARN_ON(signal_pending(current));
1965 			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1966 					       TPS("fqswaitsig"));
1967 			ret = 1; /* Keep old FQS timing. */
1968 			j = jiffies;
1969 			if (time_after(jiffies, rcu_state.jiffies_force_qs))
1970 				j = 1;
1971 			else
1972 				j = rcu_state.jiffies_force_qs - j;
1973 			gf = 0;
1974 		}
1975 	}
1976 }
1977 
1978 /*
1979  * Clean up after the old grace period.
1980  */
rcu_gp_cleanup(void)1981 static void rcu_gp_cleanup(void)
1982 {
1983 	int cpu;
1984 	bool needgp = false;
1985 	unsigned long gp_duration;
1986 	unsigned long new_gp_seq;
1987 	bool offloaded;
1988 	struct rcu_data *rdp;
1989 	struct rcu_node *rnp = rcu_get_root();
1990 	struct swait_queue_head *sq;
1991 
1992 	WRITE_ONCE(rcu_state.gp_activity, jiffies);
1993 	raw_spin_lock_irq_rcu_node(rnp);
1994 	rcu_state.gp_end = jiffies;
1995 	gp_duration = rcu_state.gp_end - rcu_state.gp_start;
1996 	if (gp_duration > rcu_state.gp_max)
1997 		rcu_state.gp_max = gp_duration;
1998 
1999 	/*
2000 	 * We know the grace period is complete, but to everyone else
2001 	 * it appears to still be ongoing.  But it is also the case
2002 	 * that to everyone else it looks like there is nothing that
2003 	 * they can do to advance the grace period.  It is therefore
2004 	 * safe for us to drop the lock in order to mark the grace
2005 	 * period as completed in all of the rcu_node structures.
2006 	 */
2007 	raw_spin_unlock_irq_rcu_node(rnp);
2008 
2009 	/*
2010 	 * Propagate new ->gp_seq value to rcu_node structures so that
2011 	 * other CPUs don't have to wait until the start of the next grace
2012 	 * period to process their callbacks.  This also avoids some nasty
2013 	 * RCU grace-period initialization races by forcing the end of
2014 	 * the current grace period to be completely recorded in all of
2015 	 * the rcu_node structures before the beginning of the next grace
2016 	 * period is recorded in any of the rcu_node structures.
2017 	 */
2018 	new_gp_seq = rcu_state.gp_seq;
2019 	rcu_seq_end(&new_gp_seq);
2020 	rcu_for_each_node_breadth_first(rnp) {
2021 		raw_spin_lock_irq_rcu_node(rnp);
2022 		if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
2023 			dump_blkd_tasks(rnp, 10);
2024 		WARN_ON_ONCE(rnp->qsmask);
2025 		WRITE_ONCE(rnp->gp_seq, new_gp_seq);
2026 		rdp = this_cpu_ptr(&rcu_data);
2027 		if (rnp == rdp->mynode)
2028 			needgp = __note_gp_changes(rnp, rdp) || needgp;
2029 		/* smp_mb() provided by prior unlock-lock pair. */
2030 		needgp = rcu_future_gp_cleanup(rnp) || needgp;
2031 		// Reset overload indication for CPUs no longer overloaded
2032 		if (rcu_is_leaf_node(rnp))
2033 			for_each_leaf_node_cpu_mask(rnp, cpu, rnp->cbovldmask) {
2034 				rdp = per_cpu_ptr(&rcu_data, cpu);
2035 				check_cb_ovld_locked(rdp, rnp);
2036 			}
2037 		sq = rcu_nocb_gp_get(rnp);
2038 		raw_spin_unlock_irq_rcu_node(rnp);
2039 		rcu_nocb_gp_cleanup(sq);
2040 		cond_resched_tasks_rcu_qs();
2041 		WRITE_ONCE(rcu_state.gp_activity, jiffies);
2042 		rcu_gp_slow(gp_cleanup_delay);
2043 	}
2044 	rnp = rcu_get_root();
2045 	raw_spin_lock_irq_rcu_node(rnp); /* GP before ->gp_seq update. */
2046 
2047 	/* Declare grace period done, trace first to use old GP number. */
2048 	trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("end"));
2049 	rcu_seq_end(&rcu_state.gp_seq);
2050 	ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq);
2051 	rcu_state.gp_state = RCU_GP_IDLE;
2052 	/* Check for GP requests since above loop. */
2053 	rdp = this_cpu_ptr(&rcu_data);
2054 	if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) {
2055 		trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed,
2056 				  TPS("CleanupMore"));
2057 		needgp = true;
2058 	}
2059 	/* Advance CBs to reduce false positives below. */
2060 	offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
2061 		    rcu_segcblist_is_offloaded(&rdp->cblist);
2062 	if ((offloaded || !rcu_accelerate_cbs(rnp, rdp)) && needgp) {
2063 		WRITE_ONCE(rcu_state.gp_flags, RCU_GP_FLAG_INIT);
2064 		WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
2065 		trace_rcu_grace_period(rcu_state.name,
2066 				       rcu_state.gp_seq,
2067 				       TPS("newreq"));
2068 	} else {
2069 		WRITE_ONCE(rcu_state.gp_flags,
2070 			   rcu_state.gp_flags & RCU_GP_FLAG_INIT);
2071 	}
2072 	raw_spin_unlock_irq_rcu_node(rnp);
2073 
2074 	// If strict, make all CPUs aware of the end of the old grace period.
2075 	if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
2076 		on_each_cpu(rcu_strict_gp_boundary, NULL, 0);
2077 }
2078 
2079 /*
2080  * Body of kthread that handles grace periods.
2081  */
rcu_gp_kthread(void * unused)2082 static int __noreturn rcu_gp_kthread(void *unused)
2083 {
2084 	rcu_bind_gp_kthread();
2085 	for (;;) {
2086 
2087 		/* Handle grace-period start. */
2088 		for (;;) {
2089 			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2090 					       TPS("reqwait"));
2091 			rcu_state.gp_state = RCU_GP_WAIT_GPS;
2092 			swait_event_idle_exclusive(rcu_state.gp_wq,
2093 					 READ_ONCE(rcu_state.gp_flags) &
2094 					 RCU_GP_FLAG_INIT);
2095 			rcu_gp_torture_wait();
2096 			rcu_state.gp_state = RCU_GP_DONE_GPS;
2097 			/* Locking provides needed memory barrier. */
2098 			if (rcu_gp_init())
2099 				break;
2100 			cond_resched_tasks_rcu_qs();
2101 			WRITE_ONCE(rcu_state.gp_activity, jiffies);
2102 			WARN_ON(signal_pending(current));
2103 			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2104 					       TPS("reqwaitsig"));
2105 		}
2106 
2107 		/* Handle quiescent-state forcing. */
2108 		rcu_gp_fqs_loop();
2109 
2110 		/* Handle grace-period end. */
2111 		rcu_state.gp_state = RCU_GP_CLEANUP;
2112 		rcu_gp_cleanup();
2113 		rcu_state.gp_state = RCU_GP_CLEANED;
2114 	}
2115 }
2116 
2117 /*
2118  * Report a full set of quiescent states to the rcu_state data structure.
2119  * Invoke rcu_gp_kthread_wake() to awaken the grace-period kthread if
2120  * another grace period is required.  Whether we wake the grace-period
2121  * kthread or it awakens itself for the next round of quiescent-state
2122  * forcing, that kthread will clean up after the just-completed grace
2123  * period.  Note that the caller must hold rnp->lock, which is released
2124  * before return.
2125  */
rcu_report_qs_rsp(unsigned long flags)2126 static void rcu_report_qs_rsp(unsigned long flags)
2127 	__releases(rcu_get_root()->lock)
2128 {
2129 	raw_lockdep_assert_held_rcu_node(rcu_get_root());
2130 	WARN_ON_ONCE(!rcu_gp_in_progress());
2131 	WRITE_ONCE(rcu_state.gp_flags,
2132 		   READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS);
2133 	raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(), flags);
2134 	rcu_gp_kthread_wake();
2135 }
2136 
2137 /*
2138  * Similar to rcu_report_qs_rdp(), for which it is a helper function.
2139  * Allows quiescent states for a group of CPUs to be reported at one go
2140  * to the specified rcu_node structure, though all the CPUs in the group
2141  * must be represented by the same rcu_node structure (which need not be a
2142  * leaf rcu_node structure, though it often will be).  The gps parameter
2143  * is the grace-period snapshot, which means that the quiescent states
2144  * are valid only if rnp->gp_seq is equal to gps.  That structure's lock
2145  * must be held upon entry, and it is released before return.
2146  *
2147  * As a special case, if mask is zero, the bit-already-cleared check is
2148  * disabled.  This allows propagating quiescent state due to resumed tasks
2149  * during grace-period initialization.
2150  */
rcu_report_qs_rnp(unsigned long mask,struct rcu_node * rnp,unsigned long gps,unsigned long flags)2151 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
2152 			      unsigned long gps, unsigned long flags)
2153 	__releases(rnp->lock)
2154 {
2155 	unsigned long oldmask = 0;
2156 	struct rcu_node *rnp_c;
2157 
2158 	raw_lockdep_assert_held_rcu_node(rnp);
2159 
2160 	/* Walk up the rcu_node hierarchy. */
2161 	for (;;) {
2162 		if ((!(rnp->qsmask & mask) && mask) || rnp->gp_seq != gps) {
2163 
2164 			/*
2165 			 * Our bit has already been cleared, or the
2166 			 * relevant grace period is already over, so done.
2167 			 */
2168 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2169 			return;
2170 		}
2171 		WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */
2172 		WARN_ON_ONCE(!rcu_is_leaf_node(rnp) &&
2173 			     rcu_preempt_blocked_readers_cgp(rnp));
2174 		WRITE_ONCE(rnp->qsmask, rnp->qsmask & ~mask);
2175 		trace_rcu_quiescent_state_report(rcu_state.name, rnp->gp_seq,
2176 						 mask, rnp->qsmask, rnp->level,
2177 						 rnp->grplo, rnp->grphi,
2178 						 !!rnp->gp_tasks);
2179 		if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
2180 
2181 			/* Other bits still set at this level, so done. */
2182 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2183 			return;
2184 		}
2185 		rnp->completedqs = rnp->gp_seq;
2186 		mask = rnp->grpmask;
2187 		if (rnp->parent == NULL) {
2188 
2189 			/* No more levels.  Exit loop holding root lock. */
2190 
2191 			break;
2192 		}
2193 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2194 		rnp_c = rnp;
2195 		rnp = rnp->parent;
2196 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
2197 		oldmask = READ_ONCE(rnp_c->qsmask);
2198 	}
2199 
2200 	/*
2201 	 * Get here if we are the last CPU to pass through a quiescent
2202 	 * state for this grace period.  Invoke rcu_report_qs_rsp()
2203 	 * to clean up and start the next grace period if one is needed.
2204 	 */
2205 	rcu_report_qs_rsp(flags); /* releases rnp->lock. */
2206 }
2207 
2208 /*
2209  * Record a quiescent state for all tasks that were previously queued
2210  * on the specified rcu_node structure and that were blocking the current
2211  * RCU grace period.  The caller must hold the corresponding rnp->lock with
2212  * irqs disabled, and this lock is released upon return, but irqs remain
2213  * disabled.
2214  */
2215 static void __maybe_unused
rcu_report_unblock_qs_rnp(struct rcu_node * rnp,unsigned long flags)2216 rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
2217 	__releases(rnp->lock)
2218 {
2219 	unsigned long gps;
2220 	unsigned long mask;
2221 	struct rcu_node *rnp_p;
2222 
2223 	raw_lockdep_assert_held_rcu_node(rnp);
2224 	if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT_RCU)) ||
2225 	    WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)) ||
2226 	    rnp->qsmask != 0) {
2227 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2228 		return;  /* Still need more quiescent states! */
2229 	}
2230 
2231 	rnp->completedqs = rnp->gp_seq;
2232 	rnp_p = rnp->parent;
2233 	if (rnp_p == NULL) {
2234 		/*
2235 		 * Only one rcu_node structure in the tree, so don't
2236 		 * try to report up to its nonexistent parent!
2237 		 */
2238 		rcu_report_qs_rsp(flags);
2239 		return;
2240 	}
2241 
2242 	/* Report up the rest of the hierarchy, tracking current ->gp_seq. */
2243 	gps = rnp->gp_seq;
2244 	mask = rnp->grpmask;
2245 	raw_spin_unlock_rcu_node(rnp);	/* irqs remain disabled. */
2246 	raw_spin_lock_rcu_node(rnp_p);	/* irqs already disabled. */
2247 	rcu_report_qs_rnp(mask, rnp_p, gps, flags);
2248 }
2249 
2250 /*
2251  * Record a quiescent state for the specified CPU to that CPU's rcu_data
2252  * structure.  This must be called from the specified CPU.
2253  */
2254 static void
rcu_report_qs_rdp(struct rcu_data * rdp)2255 rcu_report_qs_rdp(struct rcu_data *rdp)
2256 {
2257 	unsigned long flags;
2258 	unsigned long mask;
2259 	bool needwake = false;
2260 	const bool offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
2261 			       rcu_segcblist_is_offloaded(&rdp->cblist);
2262 	struct rcu_node *rnp;
2263 
2264 	WARN_ON_ONCE(rdp->cpu != smp_processor_id());
2265 	rnp = rdp->mynode;
2266 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
2267 	if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq ||
2268 	    rdp->gpwrap) {
2269 
2270 		/*
2271 		 * The grace period in which this quiescent state was
2272 		 * recorded has ended, so don't report it upwards.
2273 		 * We will instead need a new quiescent state that lies
2274 		 * within the current grace period.
2275 		 */
2276 		rdp->cpu_no_qs.b.norm = true;	/* need qs for new gp. */
2277 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2278 		return;
2279 	}
2280 	mask = rdp->grpmask;
2281 	rdp->core_needs_qs = false;
2282 	if ((rnp->qsmask & mask) == 0) {
2283 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2284 	} else {
2285 		/*
2286 		 * This GP can't end until cpu checks in, so all of our
2287 		 * callbacks can be processed during the next GP.
2288 		 */
2289 		if (!offloaded)
2290 			needwake = rcu_accelerate_cbs(rnp, rdp);
2291 
2292 		rcu_disable_urgency_upon_qs(rdp);
2293 		rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
2294 		/* ^^^ Released rnp->lock */
2295 		if (needwake)
2296 			rcu_gp_kthread_wake();
2297 	}
2298 }
2299 
2300 /*
2301  * Check to see if there is a new grace period of which this CPU
2302  * is not yet aware, and if so, set up local rcu_data state for it.
2303  * Otherwise, see if this CPU has just passed through its first
2304  * quiescent state for this grace period, and record that fact if so.
2305  */
2306 static void
rcu_check_quiescent_state(struct rcu_data * rdp)2307 rcu_check_quiescent_state(struct rcu_data *rdp)
2308 {
2309 	/* Check for grace-period ends and beginnings. */
2310 	note_gp_changes(rdp);
2311 
2312 	/*
2313 	 * Does this CPU still need to do its part for current grace period?
2314 	 * If no, return and let the other CPUs do their part as well.
2315 	 */
2316 	if (!rdp->core_needs_qs)
2317 		return;
2318 
2319 	/*
2320 	 * Was there a quiescent state since the beginning of the grace
2321 	 * period? If no, then exit and wait for the next call.
2322 	 */
2323 	if (rdp->cpu_no_qs.b.norm)
2324 		return;
2325 
2326 	/*
2327 	 * Tell RCU we are done (but rcu_report_qs_rdp() will be the
2328 	 * judge of that).
2329 	 */
2330 	rcu_report_qs_rdp(rdp);
2331 }
2332 
2333 /*
2334  * Near the end of the offline process.  Trace the fact that this CPU
2335  * is going offline.
2336  */
rcutree_dying_cpu(unsigned int cpu)2337 int rcutree_dying_cpu(unsigned int cpu)
2338 {
2339 	bool blkd;
2340 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
2341 	struct rcu_node *rnp = rdp->mynode;
2342 
2343 	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
2344 		return 0;
2345 
2346 	blkd = !!(rnp->qsmask & rdp->grpmask);
2347 	trace_rcu_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
2348 			       blkd ? TPS("cpuofl") : TPS("cpuofl-bgp"));
2349 	return 0;
2350 }
2351 
2352 /*
2353  * All CPUs for the specified rcu_node structure have gone offline,
2354  * and all tasks that were preempted within an RCU read-side critical
2355  * section while running on one of those CPUs have since exited their RCU
2356  * read-side critical section.  Some other CPU is reporting this fact with
2357  * the specified rcu_node structure's ->lock held and interrupts disabled.
2358  * This function therefore goes up the tree of rcu_node structures,
2359  * clearing the corresponding bits in the ->qsmaskinit fields.  Note that
2360  * the leaf rcu_node structure's ->qsmaskinit field has already been
2361  * updated.
2362  *
2363  * This function does check that the specified rcu_node structure has
2364  * all CPUs offline and no blocked tasks, so it is OK to invoke it
2365  * prematurely.  That said, invoking it after the fact will cost you
2366  * a needless lock acquisition.  So once it has done its work, don't
2367  * invoke it again.
2368  */
rcu_cleanup_dead_rnp(struct rcu_node * rnp_leaf)2369 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
2370 {
2371 	long mask;
2372 	struct rcu_node *rnp = rnp_leaf;
2373 
2374 	raw_lockdep_assert_held_rcu_node(rnp_leaf);
2375 	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
2376 	    WARN_ON_ONCE(rnp_leaf->qsmaskinit) ||
2377 	    WARN_ON_ONCE(rcu_preempt_has_tasks(rnp_leaf)))
2378 		return;
2379 	for (;;) {
2380 		mask = rnp->grpmask;
2381 		rnp = rnp->parent;
2382 		if (!rnp)
2383 			break;
2384 		raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
2385 		rnp->qsmaskinit &= ~mask;
2386 		/* Between grace periods, so better already be zero! */
2387 		WARN_ON_ONCE(rnp->qsmask);
2388 		if (rnp->qsmaskinit) {
2389 			raw_spin_unlock_rcu_node(rnp);
2390 			/* irqs remain disabled. */
2391 			return;
2392 		}
2393 		raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
2394 	}
2395 }
2396 
2397 /*
2398  * The CPU has been completely removed, and some other CPU is reporting
2399  * this fact from process context.  Do the remainder of the cleanup.
2400  * There can only be one CPU hotplug operation at a time, so no need for
2401  * explicit locking.
2402  */
rcutree_dead_cpu(unsigned int cpu)2403 int rcutree_dead_cpu(unsigned int cpu)
2404 {
2405 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
2406 	struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */
2407 
2408 	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
2409 		return 0;
2410 
2411 	/* Adjust any no-longer-needed kthreads. */
2412 	rcu_boost_kthread_setaffinity(rnp, -1);
2413 	/* Do any needed no-CB deferred wakeups from this CPU. */
2414 	do_nocb_deferred_wakeup(per_cpu_ptr(&rcu_data, cpu));
2415 
2416 	// Stop-machine done, so allow nohz_full to disable tick.
2417 	tick_dep_clear(TICK_DEP_BIT_RCU);
2418 	return 0;
2419 }
2420 
2421 /*
2422  * Invoke any RCU callbacks that have made it to the end of their grace
2423  * period.  Thottle as specified by rdp->blimit.
2424  */
rcu_do_batch(struct rcu_data * rdp)2425 static void rcu_do_batch(struct rcu_data *rdp)
2426 {
2427 	int div;
2428 	unsigned long flags;
2429 	const bool offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
2430 			       rcu_segcblist_is_offloaded(&rdp->cblist);
2431 	struct rcu_head *rhp;
2432 	struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
2433 	long bl, count;
2434 	long pending, tlimit = 0;
2435 
2436 	/* If no callbacks are ready, just return. */
2437 	if (!rcu_segcblist_ready_cbs(&rdp->cblist)) {
2438 		trace_rcu_batch_start(rcu_state.name,
2439 				      rcu_segcblist_n_cbs(&rdp->cblist), 0);
2440 		trace_rcu_batch_end(rcu_state.name, 0,
2441 				    !rcu_segcblist_empty(&rdp->cblist),
2442 				    need_resched(), is_idle_task(current),
2443 				    rcu_is_callbacks_kthread());
2444 		return;
2445 	}
2446 
2447 	/*
2448 	 * Extract the list of ready callbacks, disabling to prevent
2449 	 * races with call_rcu() from interrupt handlers.  Leave the
2450 	 * callback counts, as rcu_barrier() needs to be conservative.
2451 	 */
2452 	local_irq_save(flags);
2453 	rcu_nocb_lock(rdp);
2454 	WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
2455 	pending = rcu_segcblist_n_cbs(&rdp->cblist);
2456 	div = READ_ONCE(rcu_divisor);
2457 	div = div < 0 ? 7 : div > sizeof(long) * 8 - 2 ? sizeof(long) * 8 - 2 : div;
2458 	bl = max(rdp->blimit, pending >> div);
2459 	if (unlikely(bl > 100)) {
2460 		long rrn = READ_ONCE(rcu_resched_ns);
2461 
2462 		rrn = rrn < NSEC_PER_MSEC ? NSEC_PER_MSEC : rrn > NSEC_PER_SEC ? NSEC_PER_SEC : rrn;
2463 		tlimit = local_clock() + rrn;
2464 	}
2465 	trace_rcu_batch_start(rcu_state.name,
2466 			      rcu_segcblist_n_cbs(&rdp->cblist), bl);
2467 	rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl);
2468 	if (offloaded)
2469 		rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
2470 	rcu_nocb_unlock_irqrestore(rdp, flags);
2471 
2472 	/* Invoke callbacks. */
2473 	tick_dep_set_task(current, TICK_DEP_BIT_RCU);
2474 	rhp = rcu_cblist_dequeue(&rcl);
2475 	for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) {
2476 		rcu_callback_t f;
2477 
2478 		debug_rcu_head_unqueue(rhp);
2479 
2480 		rcu_lock_acquire(&rcu_callback_map);
2481 		trace_rcu_invoke_callback(rcu_state.name, rhp);
2482 
2483 		f = rhp->func;
2484 		WRITE_ONCE(rhp->func, (rcu_callback_t)0L);
2485 		f(rhp);
2486 
2487 		rcu_lock_release(&rcu_callback_map);
2488 
2489 		/*
2490 		 * Stop only if limit reached and CPU has something to do.
2491 		 * Note: The rcl structure counts down from zero.
2492 		 */
2493 		if (-rcl.len >= bl && !offloaded &&
2494 		    (need_resched() ||
2495 		     (!is_idle_task(current) && !rcu_is_callbacks_kthread())))
2496 			break;
2497 		if (unlikely(tlimit)) {
2498 			/* only call local_clock() every 32 callbacks */
2499 			if (likely((-rcl.len & 31) || local_clock() < tlimit))
2500 				continue;
2501 			/* Exceeded the time limit, so leave. */
2502 			break;
2503 		}
2504 		if (offloaded) {
2505 			WARN_ON_ONCE(in_serving_softirq());
2506 			local_bh_enable();
2507 			lockdep_assert_irqs_enabled();
2508 			cond_resched_tasks_rcu_qs();
2509 			lockdep_assert_irqs_enabled();
2510 			local_bh_disable();
2511 		}
2512 	}
2513 
2514 	local_irq_save(flags);
2515 	rcu_nocb_lock(rdp);
2516 	count = -rcl.len;
2517 	rdp->n_cbs_invoked += count;
2518 	trace_rcu_batch_end(rcu_state.name, count, !!rcl.head, need_resched(),
2519 			    is_idle_task(current), rcu_is_callbacks_kthread());
2520 
2521 	/* Update counts and requeue any remaining callbacks. */
2522 	rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl);
2523 	smp_mb(); /* List handling before counting for rcu_barrier(). */
2524 	rcu_segcblist_insert_count(&rdp->cblist, &rcl);
2525 
2526 	/* Reinstate batch limit if we have worked down the excess. */
2527 	count = rcu_segcblist_n_cbs(&rdp->cblist);
2528 	if (rdp->blimit >= DEFAULT_MAX_RCU_BLIMIT && count <= qlowmark)
2529 		rdp->blimit = blimit;
2530 
2531 	/* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
2532 	if (count == 0 && rdp->qlen_last_fqs_check != 0) {
2533 		rdp->qlen_last_fqs_check = 0;
2534 		rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
2535 	} else if (count < rdp->qlen_last_fqs_check - qhimark)
2536 		rdp->qlen_last_fqs_check = count;
2537 
2538 	/*
2539 	 * The following usually indicates a double call_rcu().  To track
2540 	 * this down, try building with CONFIG_DEBUG_OBJECTS_RCU_HEAD=y.
2541 	 */
2542 	WARN_ON_ONCE(count == 0 && !rcu_segcblist_empty(&rdp->cblist));
2543 	WARN_ON_ONCE(!IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
2544 		     count != 0 && rcu_segcblist_empty(&rdp->cblist));
2545 
2546 	rcu_nocb_unlock_irqrestore(rdp, flags);
2547 
2548 	/* Re-invoke RCU core processing if there are callbacks remaining. */
2549 	if (!offloaded && rcu_segcblist_ready_cbs(&rdp->cblist))
2550 		invoke_rcu_core();
2551 	tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
2552 }
2553 
2554 /*
2555  * This function is invoked from each scheduling-clock interrupt,
2556  * and checks to see if this CPU is in a non-context-switch quiescent
2557  * state, for example, user mode or idle loop.  It also schedules RCU
2558  * core processing.  If the current grace period has gone on too long,
2559  * it will ask the scheduler to manufacture a context switch for the sole
2560  * purpose of providing a providing the needed quiescent state.
2561  */
rcu_sched_clock_irq(int user)2562 void rcu_sched_clock_irq(int user)
2563 {
2564 	trace_rcu_utilization(TPS("Start scheduler-tick"));
2565 	lockdep_assert_irqs_disabled();
2566 	raw_cpu_inc(rcu_data.ticks_this_gp);
2567 	/* The load-acquire pairs with the store-release setting to true. */
2568 	if (smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) {
2569 		/* Idle and userspace execution already are quiescent states. */
2570 		if (!rcu_is_cpu_rrupt_from_idle() && !user) {
2571 			set_tsk_need_resched(current);
2572 			set_preempt_need_resched();
2573 		}
2574 		__this_cpu_write(rcu_data.rcu_urgent_qs, false);
2575 	}
2576 	rcu_flavor_sched_clock_irq(user);
2577 	if (rcu_pending(user))
2578 		invoke_rcu_core();
2579 	lockdep_assert_irqs_disabled();
2580 
2581 	trace_rcu_utilization(TPS("End scheduler-tick"));
2582 }
2583 
2584 /*
2585  * Scan the leaf rcu_node structures.  For each structure on which all
2586  * CPUs have reported a quiescent state and on which there are tasks
2587  * blocking the current grace period, initiate RCU priority boosting.
2588  * Otherwise, invoke the specified function to check dyntick state for
2589  * each CPU that has not yet reported a quiescent state.
2590  */
force_qs_rnp(int (* f)(struct rcu_data * rdp))2591 static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
2592 {
2593 	int cpu;
2594 	unsigned long flags;
2595 	unsigned long mask;
2596 	struct rcu_data *rdp;
2597 	struct rcu_node *rnp;
2598 
2599 	rcu_state.cbovld = rcu_state.cbovldnext;
2600 	rcu_state.cbovldnext = false;
2601 	rcu_for_each_leaf_node(rnp) {
2602 		cond_resched_tasks_rcu_qs();
2603 		mask = 0;
2604 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
2605 		rcu_state.cbovldnext |= !!rnp->cbovldmask;
2606 		if (rnp->qsmask == 0) {
2607 			if (rcu_preempt_blocked_readers_cgp(rnp)) {
2608 				/*
2609 				 * No point in scanning bits because they
2610 				 * are all zero.  But we might need to
2611 				 * priority-boost blocked readers.
2612 				 */
2613 				rcu_initiate_boost(rnp, flags);
2614 				/* rcu_initiate_boost() releases rnp->lock */
2615 				continue;
2616 			}
2617 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2618 			continue;
2619 		}
2620 		for_each_leaf_node_cpu_mask(rnp, cpu, rnp->qsmask) {
2621 			rdp = per_cpu_ptr(&rcu_data, cpu);
2622 			if (f(rdp)) {
2623 				mask |= rdp->grpmask;
2624 				rcu_disable_urgency_upon_qs(rdp);
2625 			}
2626 		}
2627 		if (mask != 0) {
2628 			/* Idle/offline CPUs, report (releases rnp->lock). */
2629 			rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
2630 		} else {
2631 			/* Nothing to do here, so just drop the lock. */
2632 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2633 		}
2634 	}
2635 }
2636 
2637 /*
2638  * Force quiescent states on reluctant CPUs, and also detect which
2639  * CPUs are in dyntick-idle mode.
2640  */
rcu_force_quiescent_state(void)2641 void rcu_force_quiescent_state(void)
2642 {
2643 	unsigned long flags;
2644 	bool ret;
2645 	struct rcu_node *rnp;
2646 	struct rcu_node *rnp_old = NULL;
2647 
2648 	/* Funnel through hierarchy to reduce memory contention. */
2649 	rnp = __this_cpu_read(rcu_data.mynode);
2650 	for (; rnp != NULL; rnp = rnp->parent) {
2651 		ret = (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) ||
2652 		       !raw_spin_trylock(&rnp->fqslock);
2653 		if (rnp_old != NULL)
2654 			raw_spin_unlock(&rnp_old->fqslock);
2655 		if (ret)
2656 			return;
2657 		rnp_old = rnp;
2658 	}
2659 	/* rnp_old == rcu_get_root(), rnp == NULL. */
2660 
2661 	/* Reached the root of the rcu_node tree, acquire lock. */
2662 	raw_spin_lock_irqsave_rcu_node(rnp_old, flags);
2663 	raw_spin_unlock(&rnp_old->fqslock);
2664 	if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
2665 		raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2666 		return;  /* Someone beat us to it. */
2667 	}
2668 	WRITE_ONCE(rcu_state.gp_flags,
2669 		   READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS);
2670 	raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2671 	rcu_gp_kthread_wake();
2672 }
2673 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
2674 
2675 // Workqueue handler for an RCU reader for kernels enforcing struct RCU
2676 // grace periods.
strict_work_handler(struct work_struct * work)2677 static void strict_work_handler(struct work_struct *work)
2678 {
2679 	rcu_read_lock();
2680 	rcu_read_unlock();
2681 }
2682 
2683 /* Perform RCU core processing work for the current CPU.  */
rcu_core(void)2684 static __latent_entropy void rcu_core(void)
2685 {
2686 	unsigned long flags;
2687 	struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);
2688 	struct rcu_node *rnp = rdp->mynode;
2689 	const bool offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
2690 			       rcu_segcblist_is_offloaded(&rdp->cblist);
2691 
2692 	if (cpu_is_offline(smp_processor_id()))
2693 		return;
2694 	trace_rcu_utilization(TPS("Start RCU core"));
2695 	WARN_ON_ONCE(!rdp->beenonline);
2696 
2697 	/* Report any deferred quiescent states if preemption enabled. */
2698 	if (!(preempt_count() & PREEMPT_MASK)) {
2699 		rcu_preempt_deferred_qs(current);
2700 	} else if (rcu_preempt_need_deferred_qs(current)) {
2701 		set_tsk_need_resched(current);
2702 		set_preempt_need_resched();
2703 	}
2704 
2705 	/* Update RCU state based on any recent quiescent states. */
2706 	rcu_check_quiescent_state(rdp);
2707 
2708 	/* No grace period and unregistered callbacks? */
2709 	if (!rcu_gp_in_progress() &&
2710 	    rcu_segcblist_is_enabled(&rdp->cblist) && !offloaded) {
2711 		local_irq_save(flags);
2712 		if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
2713 			rcu_accelerate_cbs_unlocked(rnp, rdp);
2714 		local_irq_restore(flags);
2715 	}
2716 
2717 	rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check());
2718 
2719 	/* If there are callbacks ready, invoke them. */
2720 	if (!offloaded && rcu_segcblist_ready_cbs(&rdp->cblist) &&
2721 	    likely(READ_ONCE(rcu_scheduler_fully_active)))
2722 		rcu_do_batch(rdp);
2723 
2724 	/* Do any needed deferred wakeups of rcuo kthreads. */
2725 	do_nocb_deferred_wakeup(rdp);
2726 	trace_rcu_utilization(TPS("End RCU core"));
2727 
2728 	// If strict GPs, schedule an RCU reader in a clean environment.
2729 	if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
2730 		queue_work_on(rdp->cpu, rcu_gp_wq, &rdp->strict_work);
2731 }
2732 
rcu_core_si(struct softirq_action * h)2733 static void rcu_core_si(struct softirq_action *h)
2734 {
2735 	rcu_core();
2736 }
2737 
rcu_wake_cond(struct task_struct * t,int status)2738 static void rcu_wake_cond(struct task_struct *t, int status)
2739 {
2740 	/*
2741 	 * If the thread is yielding, only wake it when this
2742 	 * is invoked from idle
2743 	 */
2744 	if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current)))
2745 		wake_up_process(t);
2746 }
2747 
invoke_rcu_core_kthread(void)2748 static void invoke_rcu_core_kthread(void)
2749 {
2750 	struct task_struct *t;
2751 	unsigned long flags;
2752 
2753 	local_irq_save(flags);
2754 	__this_cpu_write(rcu_data.rcu_cpu_has_work, 1);
2755 	t = __this_cpu_read(rcu_data.rcu_cpu_kthread_task);
2756 	if (t != NULL && t != current)
2757 		rcu_wake_cond(t, __this_cpu_read(rcu_data.rcu_cpu_kthread_status));
2758 	local_irq_restore(flags);
2759 }
2760 
2761 /*
2762  * Wake up this CPU's rcuc kthread to do RCU core processing.
2763  */
invoke_rcu_core(void)2764 static void invoke_rcu_core(void)
2765 {
2766 	if (!cpu_online(smp_processor_id()))
2767 		return;
2768 	if (use_softirq)
2769 		raise_softirq(RCU_SOFTIRQ);
2770 	else
2771 		invoke_rcu_core_kthread();
2772 }
2773 
rcu_cpu_kthread_park(unsigned int cpu)2774 static void rcu_cpu_kthread_park(unsigned int cpu)
2775 {
2776 	per_cpu(rcu_data.rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
2777 }
2778 
rcu_cpu_kthread_should_run(unsigned int cpu)2779 static int rcu_cpu_kthread_should_run(unsigned int cpu)
2780 {
2781 	return __this_cpu_read(rcu_data.rcu_cpu_has_work);
2782 }
2783 
2784 /*
2785  * Per-CPU kernel thread that invokes RCU callbacks.  This replaces
2786  * the RCU softirq used in configurations of RCU that do not support RCU
2787  * priority boosting.
2788  */
rcu_cpu_kthread(unsigned int cpu)2789 static void rcu_cpu_kthread(unsigned int cpu)
2790 {
2791 	unsigned int *statusp = this_cpu_ptr(&rcu_data.rcu_cpu_kthread_status);
2792 	char work, *workp = this_cpu_ptr(&rcu_data.rcu_cpu_has_work);
2793 	int spincnt;
2794 
2795 	trace_rcu_utilization(TPS("Start CPU kthread@rcu_run"));
2796 	for (spincnt = 0; spincnt < 10; spincnt++) {
2797 		local_bh_disable();
2798 		*statusp = RCU_KTHREAD_RUNNING;
2799 		local_irq_disable();
2800 		work = *workp;
2801 		*workp = 0;
2802 		local_irq_enable();
2803 		if (work)
2804 			rcu_core();
2805 		local_bh_enable();
2806 		if (*workp == 0) {
2807 			trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
2808 			*statusp = RCU_KTHREAD_WAITING;
2809 			return;
2810 		}
2811 	}
2812 	*statusp = RCU_KTHREAD_YIELDING;
2813 	trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
2814 	schedule_timeout_idle(2);
2815 	trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
2816 	*statusp = RCU_KTHREAD_WAITING;
2817 }
2818 
2819 static struct smp_hotplug_thread rcu_cpu_thread_spec = {
2820 	.store			= &rcu_data.rcu_cpu_kthread_task,
2821 	.thread_should_run	= rcu_cpu_kthread_should_run,
2822 	.thread_fn		= rcu_cpu_kthread,
2823 	.thread_comm		= "rcuc/%u",
2824 	.setup			= rcu_cpu_kthread_setup,
2825 	.park			= rcu_cpu_kthread_park,
2826 };
2827 
2828 /*
2829  * Spawn per-CPU RCU core processing kthreads.
2830  */
rcu_spawn_core_kthreads(void)2831 static int __init rcu_spawn_core_kthreads(void)
2832 {
2833 	int cpu;
2834 
2835 	for_each_possible_cpu(cpu)
2836 		per_cpu(rcu_data.rcu_cpu_has_work, cpu) = 0;
2837 	if (!IS_ENABLED(CONFIG_RCU_BOOST) && use_softirq)
2838 		return 0;
2839 	WARN_ONCE(smpboot_register_percpu_thread(&rcu_cpu_thread_spec),
2840 		  "%s: Could not start rcuc kthread, OOM is now expected behavior\n", __func__);
2841 	return 0;
2842 }
2843 
2844 /*
2845  * Handle any core-RCU processing required by a call_rcu() invocation.
2846  */
__call_rcu_core(struct rcu_data * rdp,struct rcu_head * head,unsigned long flags)2847 static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head,
2848 			    unsigned long flags)
2849 {
2850 	/*
2851 	 * If called from an extended quiescent state, invoke the RCU
2852 	 * core in order to force a re-evaluation of RCU's idleness.
2853 	 */
2854 	if (!rcu_is_watching())
2855 		invoke_rcu_core();
2856 
2857 	/* If interrupts were disabled or CPU offline, don't invoke RCU core. */
2858 	if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id()))
2859 		return;
2860 
2861 	/*
2862 	 * Force the grace period if too many callbacks or too long waiting.
2863 	 * Enforce hysteresis, and don't invoke rcu_force_quiescent_state()
2864 	 * if some other CPU has recently done so.  Also, don't bother
2865 	 * invoking rcu_force_quiescent_state() if the newly enqueued callback
2866 	 * is the only one waiting for a grace period to complete.
2867 	 */
2868 	if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) >
2869 		     rdp->qlen_last_fqs_check + qhimark)) {
2870 
2871 		/* Are we ignoring a completed grace period? */
2872 		note_gp_changes(rdp);
2873 
2874 		/* Start a new grace period if one not already started. */
2875 		if (!rcu_gp_in_progress()) {
2876 			rcu_accelerate_cbs_unlocked(rdp->mynode, rdp);
2877 		} else {
2878 			/* Give the grace period a kick. */
2879 			rdp->blimit = DEFAULT_MAX_RCU_BLIMIT;
2880 			if (READ_ONCE(rcu_state.n_force_qs) == rdp->n_force_qs_snap &&
2881 			    rcu_segcblist_first_pend_cb(&rdp->cblist) != head)
2882 				rcu_force_quiescent_state();
2883 			rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
2884 			rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
2885 		}
2886 	}
2887 }
2888 
2889 /*
2890  * RCU callback function to leak a callback.
2891  */
rcu_leak_callback(struct rcu_head * rhp)2892 static void rcu_leak_callback(struct rcu_head *rhp)
2893 {
2894 }
2895 
2896 /*
2897  * Check and if necessary update the leaf rcu_node structure's
2898  * ->cbovldmask bit corresponding to the current CPU based on that CPU's
2899  * number of queued RCU callbacks.  The caller must hold the leaf rcu_node
2900  * structure's ->lock.
2901  */
check_cb_ovld_locked(struct rcu_data * rdp,struct rcu_node * rnp)2902 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp)
2903 {
2904 	raw_lockdep_assert_held_rcu_node(rnp);
2905 	if (qovld_calc <= 0)
2906 		return; // Early boot and wildcard value set.
2907 	if (rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc)
2908 		WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask | rdp->grpmask);
2909 	else
2910 		WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask & ~rdp->grpmask);
2911 }
2912 
2913 /*
2914  * Check and if necessary update the leaf rcu_node structure's
2915  * ->cbovldmask bit corresponding to the current CPU based on that CPU's
2916  * number of queued RCU callbacks.  No locks need be held, but the
2917  * caller must have disabled interrupts.
2918  *
2919  * Note that this function ignores the possibility that there are a lot
2920  * of callbacks all of which have already seen the end of their respective
2921  * grace periods.  This omission is due to the need for no-CBs CPUs to
2922  * be holding ->nocb_lock to do this check, which is too heavy for a
2923  * common-case operation.
2924  */
check_cb_ovld(struct rcu_data * rdp)2925 static void check_cb_ovld(struct rcu_data *rdp)
2926 {
2927 	struct rcu_node *const rnp = rdp->mynode;
2928 
2929 	if (qovld_calc <= 0 ||
2930 	    ((rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc) ==
2931 	     !!(READ_ONCE(rnp->cbovldmask) & rdp->grpmask)))
2932 		return; // Early boot wildcard value or already set correctly.
2933 	raw_spin_lock_rcu_node(rnp);
2934 	check_cb_ovld_locked(rdp, rnp);
2935 	raw_spin_unlock_rcu_node(rnp);
2936 }
2937 
2938 /* Helper function for call_rcu() and friends.  */
2939 static void
__call_rcu(struct rcu_head * head,rcu_callback_t func)2940 __call_rcu(struct rcu_head *head, rcu_callback_t func)
2941 {
2942 	unsigned long flags;
2943 	struct rcu_data *rdp;
2944 	bool was_alldone;
2945 
2946 	/* Misaligned rcu_head! */
2947 	WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1));
2948 
2949 	if (debug_rcu_head_queue(head)) {
2950 		/*
2951 		 * Probable double call_rcu(), so leak the callback.
2952 		 * Use rcu:rcu_callback trace event to find the previous
2953 		 * time callback was passed to __call_rcu().
2954 		 */
2955 		WARN_ONCE(1, "__call_rcu(): Double-freed CB %p->%pS()!!!\n",
2956 			  head, head->func);
2957 		WRITE_ONCE(head->func, rcu_leak_callback);
2958 		return;
2959 	}
2960 	head->func = func;
2961 	head->next = NULL;
2962 	local_irq_save(flags);
2963 	kasan_record_aux_stack(head);
2964 	rdp = this_cpu_ptr(&rcu_data);
2965 
2966 	/* Add the callback to our list. */
2967 	if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist))) {
2968 		// This can trigger due to call_rcu() from offline CPU:
2969 		WARN_ON_ONCE(rcu_scheduler_active != RCU_SCHEDULER_INACTIVE);
2970 		WARN_ON_ONCE(!rcu_is_watching());
2971 		// Very early boot, before rcu_init().  Initialize if needed
2972 		// and then drop through to queue the callback.
2973 		if (rcu_segcblist_empty(&rdp->cblist))
2974 			rcu_segcblist_init(&rdp->cblist);
2975 	}
2976 
2977 	check_cb_ovld(rdp);
2978 	if (rcu_nocb_try_bypass(rdp, head, &was_alldone, flags))
2979 		return; // Enqueued onto ->nocb_bypass, so just leave.
2980 	// If no-CBs CPU gets here, rcu_nocb_try_bypass() acquired ->nocb_lock.
2981 	rcu_segcblist_enqueue(&rdp->cblist, head);
2982 	if (__is_kvfree_rcu_offset((unsigned long)func))
2983 		trace_rcu_kvfree_callback(rcu_state.name, head,
2984 					 (unsigned long)func,
2985 					 rcu_segcblist_n_cbs(&rdp->cblist));
2986 	else
2987 		trace_rcu_callback(rcu_state.name, head,
2988 				   rcu_segcblist_n_cbs(&rdp->cblist));
2989 
2990 	/* Go handle any RCU core processing required. */
2991 	if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
2992 	    unlikely(rcu_segcblist_is_offloaded(&rdp->cblist))) {
2993 		__call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */
2994 	} else {
2995 		__call_rcu_core(rdp, head, flags);
2996 		local_irq_restore(flags);
2997 	}
2998 }
2999 
3000 /**
3001  * call_rcu() - Queue an RCU callback for invocation after a grace period.
3002  * @head: structure to be used for queueing the RCU updates.
3003  * @func: actual callback function to be invoked after the grace period
3004  *
3005  * The callback function will be invoked some time after a full grace
3006  * period elapses, in other words after all pre-existing RCU read-side
3007  * critical sections have completed.  However, the callback function
3008  * might well execute concurrently with RCU read-side critical sections
3009  * that started after call_rcu() was invoked.  RCU read-side critical
3010  * sections are delimited by rcu_read_lock() and rcu_read_unlock(), and
3011  * may be nested.  In addition, regions of code across which interrupts,
3012  * preemption, or softirqs have been disabled also serve as RCU read-side
3013  * critical sections.  This includes hardware interrupt handlers, softirq
3014  * handlers, and NMI handlers.
3015  *
3016  * Note that all CPUs must agree that the grace period extended beyond
3017  * all pre-existing RCU read-side critical section.  On systems with more
3018  * than one CPU, this means that when "func()" is invoked, each CPU is
3019  * guaranteed to have executed a full memory barrier since the end of its
3020  * last RCU read-side critical section whose beginning preceded the call
3021  * to call_rcu().  It also means that each CPU executing an RCU read-side
3022  * critical section that continues beyond the start of "func()" must have
3023  * executed a memory barrier after the call_rcu() but before the beginning
3024  * of that RCU read-side critical section.  Note that these guarantees
3025  * include CPUs that are offline, idle, or executing in user mode, as
3026  * well as CPUs that are executing in the kernel.
3027  *
3028  * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
3029  * resulting RCU callback function "func()", then both CPU A and CPU B are
3030  * guaranteed to execute a full memory barrier during the time interval
3031  * between the call to call_rcu() and the invocation of "func()" -- even
3032  * if CPU A and CPU B are the same CPU (but again only if the system has
3033  * more than one CPU).
3034  */
call_rcu(struct rcu_head * head,rcu_callback_t func)3035 void call_rcu(struct rcu_head *head, rcu_callback_t func)
3036 {
3037 	__call_rcu(head, func);
3038 }
3039 EXPORT_SYMBOL_GPL(call_rcu);
3040 
3041 
3042 /* Maximum number of jiffies to wait before draining a batch. */
3043 #define KFREE_DRAIN_JIFFIES (HZ / 50)
3044 #define KFREE_N_BATCHES 2
3045 #define FREE_N_CHANNELS 2
3046 
3047 /**
3048  * struct kvfree_rcu_bulk_data - single block to store kvfree_rcu() pointers
3049  * @nr_records: Number of active pointers in the array
3050  * @next: Next bulk object in the block chain
3051  * @records: Array of the kvfree_rcu() pointers
3052  */
3053 struct kvfree_rcu_bulk_data {
3054 	unsigned long nr_records;
3055 	struct kvfree_rcu_bulk_data *next;
3056 	void *records[];
3057 };
3058 
3059 /*
3060  * This macro defines how many entries the "records" array
3061  * will contain. It is based on the fact that the size of
3062  * kvfree_rcu_bulk_data structure becomes exactly one page.
3063  */
3064 #define KVFREE_BULK_MAX_ENTR \
3065 	((PAGE_SIZE - sizeof(struct kvfree_rcu_bulk_data)) / sizeof(void *))
3066 
3067 /**
3068  * struct kfree_rcu_cpu_work - single batch of kfree_rcu() requests
3069  * @rcu_work: Let queue_rcu_work() invoke workqueue handler after grace period
3070  * @head_free: List of kfree_rcu() objects waiting for a grace period
3071  * @bkvhead_free: Bulk-List of kvfree_rcu() objects waiting for a grace period
3072  * @krcp: Pointer to @kfree_rcu_cpu structure
3073  */
3074 
3075 struct kfree_rcu_cpu_work {
3076 	struct rcu_work rcu_work;
3077 	struct rcu_head *head_free;
3078 	struct kvfree_rcu_bulk_data *bkvhead_free[FREE_N_CHANNELS];
3079 	struct kfree_rcu_cpu *krcp;
3080 };
3081 
3082 /**
3083  * struct kfree_rcu_cpu - batch up kfree_rcu() requests for RCU grace period
3084  * @head: List of kfree_rcu() objects not yet waiting for a grace period
3085  * @bkvhead: Bulk-List of kvfree_rcu() objects not yet waiting for a grace period
3086  * @krw_arr: Array of batches of kfree_rcu() objects waiting for a grace period
3087  * @lock: Synchronize access to this structure
3088  * @monitor_work: Promote @head to @head_free after KFREE_DRAIN_JIFFIES
3089  * @monitor_todo: Tracks whether a @monitor_work delayed work is pending
3090  * @initialized: The @rcu_work fields have been initialized
3091  * @count: Number of objects for which GP not started
3092  * @bkvcache:
3093  *	A simple cache list that contains objects for reuse purpose.
3094  *	In order to save some per-cpu space the list is singular.
3095  *	Even though it is lockless an access has to be protected by the
3096  *	per-cpu lock.
3097  * @page_cache_work: A work to refill the cache when it is empty
3098  * @work_in_progress: Indicates that page_cache_work is running
3099  * @hrtimer: A hrtimer for scheduling a page_cache_work
3100  * @nr_bkv_objs: number of allocated objects at @bkvcache.
3101  *
3102  * This is a per-CPU structure.  The reason that it is not included in
3103  * the rcu_data structure is to permit this code to be extracted from
3104  * the RCU files.  Such extraction could allow further optimization of
3105  * the interactions with the slab allocators.
3106  */
3107 struct kfree_rcu_cpu {
3108 	struct rcu_head *head;
3109 	struct kvfree_rcu_bulk_data *bkvhead[FREE_N_CHANNELS];
3110 	struct kfree_rcu_cpu_work krw_arr[KFREE_N_BATCHES];
3111 	raw_spinlock_t lock;
3112 	struct delayed_work monitor_work;
3113 	bool monitor_todo;
3114 	bool initialized;
3115 	int count;
3116 
3117 	struct work_struct page_cache_work;
3118 	atomic_t work_in_progress;
3119 	struct hrtimer hrtimer;
3120 
3121 	struct llist_head bkvcache;
3122 	int nr_bkv_objs;
3123 };
3124 
3125 static DEFINE_PER_CPU(struct kfree_rcu_cpu, krc) = {
3126 	.lock = __RAW_SPIN_LOCK_UNLOCKED(krc.lock),
3127 };
3128 
3129 static __always_inline void
debug_rcu_bhead_unqueue(struct kvfree_rcu_bulk_data * bhead)3130 debug_rcu_bhead_unqueue(struct kvfree_rcu_bulk_data *bhead)
3131 {
3132 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
3133 	int i;
3134 
3135 	for (i = 0; i < bhead->nr_records; i++)
3136 		debug_rcu_head_unqueue((struct rcu_head *)(bhead->records[i]));
3137 #endif
3138 }
3139 
3140 static inline struct kfree_rcu_cpu *
krc_this_cpu_lock(unsigned long * flags)3141 krc_this_cpu_lock(unsigned long *flags)
3142 {
3143 	struct kfree_rcu_cpu *krcp;
3144 
3145 	local_irq_save(*flags);	// For safely calling this_cpu_ptr().
3146 	krcp = this_cpu_ptr(&krc);
3147 	raw_spin_lock(&krcp->lock);
3148 
3149 	return krcp;
3150 }
3151 
3152 static inline void
krc_this_cpu_unlock(struct kfree_rcu_cpu * krcp,unsigned long flags)3153 krc_this_cpu_unlock(struct kfree_rcu_cpu *krcp, unsigned long flags)
3154 {
3155 	raw_spin_unlock(&krcp->lock);
3156 	local_irq_restore(flags);
3157 }
3158 
3159 static inline struct kvfree_rcu_bulk_data *
get_cached_bnode(struct kfree_rcu_cpu * krcp)3160 get_cached_bnode(struct kfree_rcu_cpu *krcp)
3161 {
3162 	if (!krcp->nr_bkv_objs)
3163 		return NULL;
3164 
3165 	krcp->nr_bkv_objs--;
3166 	return (struct kvfree_rcu_bulk_data *)
3167 		llist_del_first(&krcp->bkvcache);
3168 }
3169 
3170 static inline bool
put_cached_bnode(struct kfree_rcu_cpu * krcp,struct kvfree_rcu_bulk_data * bnode)3171 put_cached_bnode(struct kfree_rcu_cpu *krcp,
3172 	struct kvfree_rcu_bulk_data *bnode)
3173 {
3174 	// Check the limit.
3175 	if (krcp->nr_bkv_objs >= rcu_min_cached_objs)
3176 		return false;
3177 
3178 	llist_add((struct llist_node *) bnode, &krcp->bkvcache);
3179 	krcp->nr_bkv_objs++;
3180 	return true;
3181 
3182 }
3183 
3184 /*
3185  * This function is invoked in workqueue context after a grace period.
3186  * It frees all the objects queued on ->bhead_free or ->head_free.
3187  */
kfree_rcu_work(struct work_struct * work)3188 static void kfree_rcu_work(struct work_struct *work)
3189 {
3190 	unsigned long flags;
3191 	struct kvfree_rcu_bulk_data *bkvhead[FREE_N_CHANNELS], *bnext;
3192 	struct rcu_head *head, *next;
3193 	struct kfree_rcu_cpu *krcp;
3194 	struct kfree_rcu_cpu_work *krwp;
3195 	int i, j;
3196 
3197 	krwp = container_of(to_rcu_work(work),
3198 			    struct kfree_rcu_cpu_work, rcu_work);
3199 	krcp = krwp->krcp;
3200 
3201 	raw_spin_lock_irqsave(&krcp->lock, flags);
3202 	// Channels 1 and 2.
3203 	for (i = 0; i < FREE_N_CHANNELS; i++) {
3204 		bkvhead[i] = krwp->bkvhead_free[i];
3205 		krwp->bkvhead_free[i] = NULL;
3206 	}
3207 
3208 	// Channel 3.
3209 	head = krwp->head_free;
3210 	krwp->head_free = NULL;
3211 	raw_spin_unlock_irqrestore(&krcp->lock, flags);
3212 
3213 	// Handle two first channels.
3214 	for (i = 0; i < FREE_N_CHANNELS; i++) {
3215 		for (; bkvhead[i]; bkvhead[i] = bnext) {
3216 			bnext = bkvhead[i]->next;
3217 			debug_rcu_bhead_unqueue(bkvhead[i]);
3218 
3219 			rcu_lock_acquire(&rcu_callback_map);
3220 			if (i == 0) { // kmalloc() / kfree().
3221 				trace_rcu_invoke_kfree_bulk_callback(
3222 					rcu_state.name, bkvhead[i]->nr_records,
3223 					bkvhead[i]->records);
3224 
3225 				kfree_bulk(bkvhead[i]->nr_records,
3226 					bkvhead[i]->records);
3227 			} else { // vmalloc() / vfree().
3228 				for (j = 0; j < bkvhead[i]->nr_records; j++) {
3229 					trace_rcu_invoke_kvfree_callback(
3230 						rcu_state.name,
3231 						bkvhead[i]->records[j], 0);
3232 
3233 					vfree(bkvhead[i]->records[j]);
3234 				}
3235 			}
3236 			rcu_lock_release(&rcu_callback_map);
3237 
3238 			raw_spin_lock_irqsave(&krcp->lock, flags);
3239 			if (put_cached_bnode(krcp, bkvhead[i]))
3240 				bkvhead[i] = NULL;
3241 			raw_spin_unlock_irqrestore(&krcp->lock, flags);
3242 
3243 			if (bkvhead[i])
3244 				free_page((unsigned long) bkvhead[i]);
3245 
3246 			cond_resched_tasks_rcu_qs();
3247 		}
3248 	}
3249 
3250 	/*
3251 	 * Emergency case only. It can happen under low memory
3252 	 * condition when an allocation gets failed, so the "bulk"
3253 	 * path can not be temporary maintained.
3254 	 */
3255 	for (; head; head = next) {
3256 		unsigned long offset = (unsigned long)head->func;
3257 		void *ptr = (void *)head - offset;
3258 
3259 		next = head->next;
3260 		debug_rcu_head_unqueue((struct rcu_head *)ptr);
3261 		rcu_lock_acquire(&rcu_callback_map);
3262 		trace_rcu_invoke_kvfree_callback(rcu_state.name, head, offset);
3263 
3264 		if (!WARN_ON_ONCE(!__is_kvfree_rcu_offset(offset)))
3265 			kvfree(ptr);
3266 
3267 		rcu_lock_release(&rcu_callback_map);
3268 		cond_resched_tasks_rcu_qs();
3269 	}
3270 }
3271 
3272 /*
3273  * Schedule the kfree batch RCU work to run in workqueue context after a GP.
3274  *
3275  * This function is invoked by kfree_rcu_monitor() when the KFREE_DRAIN_JIFFIES
3276  * timeout has been reached.
3277  */
queue_kfree_rcu_work(struct kfree_rcu_cpu * krcp)3278 static inline bool queue_kfree_rcu_work(struct kfree_rcu_cpu *krcp)
3279 {
3280 	struct kfree_rcu_cpu_work *krwp;
3281 	bool repeat = false;
3282 	int i, j;
3283 
3284 	lockdep_assert_held(&krcp->lock);
3285 
3286 	for (i = 0; i < KFREE_N_BATCHES; i++) {
3287 		krwp = &(krcp->krw_arr[i]);
3288 
3289 		/*
3290 		 * Try to detach bkvhead or head and attach it over any
3291 		 * available corresponding free channel. It can be that
3292 		 * a previous RCU batch is in progress, it means that
3293 		 * immediately to queue another one is not possible so
3294 		 * return false to tell caller to retry.
3295 		 */
3296 		if ((krcp->bkvhead[0] && !krwp->bkvhead_free[0]) ||
3297 			(krcp->bkvhead[1] && !krwp->bkvhead_free[1]) ||
3298 				(krcp->head && !krwp->head_free)) {
3299 			// Channel 1 corresponds to SLAB ptrs.
3300 			// Channel 2 corresponds to vmalloc ptrs.
3301 			for (j = 0; j < FREE_N_CHANNELS; j++) {
3302 				if (!krwp->bkvhead_free[j]) {
3303 					krwp->bkvhead_free[j] = krcp->bkvhead[j];
3304 					krcp->bkvhead[j] = NULL;
3305 				}
3306 			}
3307 
3308 			// Channel 3 corresponds to emergency path.
3309 			if (!krwp->head_free) {
3310 				krwp->head_free = krcp->head;
3311 				krcp->head = NULL;
3312 			}
3313 
3314 			WRITE_ONCE(krcp->count, 0);
3315 
3316 			/*
3317 			 * One work is per one batch, so there are three
3318 			 * "free channels", the batch can handle. It can
3319 			 * be that the work is in the pending state when
3320 			 * channels have been detached following by each
3321 			 * other.
3322 			 */
3323 			queue_rcu_work(system_wq, &krwp->rcu_work);
3324 		}
3325 
3326 		// Repeat if any "free" corresponding channel is still busy.
3327 		if (krcp->bkvhead[0] || krcp->bkvhead[1] || krcp->head)
3328 			repeat = true;
3329 	}
3330 
3331 	return !repeat;
3332 }
3333 
kfree_rcu_drain_unlock(struct kfree_rcu_cpu * krcp,unsigned long flags)3334 static inline void kfree_rcu_drain_unlock(struct kfree_rcu_cpu *krcp,
3335 					  unsigned long flags)
3336 {
3337 	// Attempt to start a new batch.
3338 	krcp->monitor_todo = false;
3339 	if (queue_kfree_rcu_work(krcp)) {
3340 		// Success! Our job is done here.
3341 		raw_spin_unlock_irqrestore(&krcp->lock, flags);
3342 		return;
3343 	}
3344 
3345 	// Previous RCU batch still in progress, try again later.
3346 	krcp->monitor_todo = true;
3347 	schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
3348 	raw_spin_unlock_irqrestore(&krcp->lock, flags);
3349 }
3350 
3351 /*
3352  * This function is invoked after the KFREE_DRAIN_JIFFIES timeout.
3353  * It invokes kfree_rcu_drain_unlock() to attempt to start another batch.
3354  */
kfree_rcu_monitor(struct work_struct * work)3355 static void kfree_rcu_monitor(struct work_struct *work)
3356 {
3357 	unsigned long flags;
3358 	struct kfree_rcu_cpu *krcp = container_of(work, struct kfree_rcu_cpu,
3359 						 monitor_work.work);
3360 
3361 	raw_spin_lock_irqsave(&krcp->lock, flags);
3362 	if (krcp->monitor_todo)
3363 		kfree_rcu_drain_unlock(krcp, flags);
3364 	else
3365 		raw_spin_unlock_irqrestore(&krcp->lock, flags);
3366 }
3367 
3368 static enum hrtimer_restart
schedule_page_work_fn(struct hrtimer * t)3369 schedule_page_work_fn(struct hrtimer *t)
3370 {
3371 	struct kfree_rcu_cpu *krcp =
3372 		container_of(t, struct kfree_rcu_cpu, hrtimer);
3373 
3374 	queue_work(system_highpri_wq, &krcp->page_cache_work);
3375 	return HRTIMER_NORESTART;
3376 }
3377 
fill_page_cache_func(struct work_struct * work)3378 static void fill_page_cache_func(struct work_struct *work)
3379 {
3380 	struct kvfree_rcu_bulk_data *bnode;
3381 	struct kfree_rcu_cpu *krcp =
3382 		container_of(work, struct kfree_rcu_cpu,
3383 			page_cache_work);
3384 	unsigned long flags;
3385 	bool pushed;
3386 	int i;
3387 
3388 	for (i = 0; i < rcu_min_cached_objs; i++) {
3389 		bnode = (struct kvfree_rcu_bulk_data *)
3390 			__get_free_page(GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
3391 
3392 		if (bnode) {
3393 			raw_spin_lock_irqsave(&krcp->lock, flags);
3394 			pushed = put_cached_bnode(krcp, bnode);
3395 			raw_spin_unlock_irqrestore(&krcp->lock, flags);
3396 
3397 			if (!pushed) {
3398 				free_page((unsigned long) bnode);
3399 				break;
3400 			}
3401 		}
3402 	}
3403 
3404 	atomic_set(&krcp->work_in_progress, 0);
3405 }
3406 
3407 static void
run_page_cache_worker(struct kfree_rcu_cpu * krcp)3408 run_page_cache_worker(struct kfree_rcu_cpu *krcp)
3409 {
3410 	if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
3411 			!atomic_xchg(&krcp->work_in_progress, 1)) {
3412 		hrtimer_init(&krcp->hrtimer, CLOCK_MONOTONIC,
3413 			HRTIMER_MODE_REL);
3414 		krcp->hrtimer.function = schedule_page_work_fn;
3415 		hrtimer_start(&krcp->hrtimer, 0, HRTIMER_MODE_REL);
3416 	}
3417 }
3418 
3419 static inline bool
kvfree_call_rcu_add_ptr_to_bulk(struct kfree_rcu_cpu * krcp,void * ptr)3420 kvfree_call_rcu_add_ptr_to_bulk(struct kfree_rcu_cpu *krcp, void *ptr)
3421 {
3422 	struct kvfree_rcu_bulk_data *bnode;
3423 	int idx;
3424 
3425 	if (unlikely(!krcp->initialized))
3426 		return false;
3427 
3428 	lockdep_assert_held(&krcp->lock);
3429 	idx = !!is_vmalloc_addr(ptr);
3430 
3431 	/* Check if a new block is required. */
3432 	if (!krcp->bkvhead[idx] ||
3433 			krcp->bkvhead[idx]->nr_records == KVFREE_BULK_MAX_ENTR) {
3434 		bnode = get_cached_bnode(krcp);
3435 		/* Switch to emergency path. */
3436 		if (!bnode)
3437 			return false;
3438 
3439 		/* Initialize the new block. */
3440 		bnode->nr_records = 0;
3441 		bnode->next = krcp->bkvhead[idx];
3442 
3443 		/* Attach it to the head. */
3444 		krcp->bkvhead[idx] = bnode;
3445 	}
3446 
3447 	/* Finally insert. */
3448 	krcp->bkvhead[idx]->records
3449 		[krcp->bkvhead[idx]->nr_records++] = ptr;
3450 
3451 	return true;
3452 }
3453 
3454 /*
3455  * Queue a request for lazy invocation of appropriate free routine after a
3456  * grace period. Please note there are three paths are maintained, two are the
3457  * main ones that use array of pointers interface and third one is emergency
3458  * one, that is used only when the main path can not be maintained temporary,
3459  * due to memory pressure.
3460  *
3461  * Each kvfree_call_rcu() request is added to a batch. The batch will be drained
3462  * every KFREE_DRAIN_JIFFIES number of jiffies. All the objects in the batch will
3463  * be free'd in workqueue context. This allows us to: batch requests together to
3464  * reduce the number of grace periods during heavy kfree_rcu()/kvfree_rcu() load.
3465  */
kvfree_call_rcu(struct rcu_head * head,rcu_callback_t func)3466 void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
3467 {
3468 	unsigned long flags;
3469 	struct kfree_rcu_cpu *krcp;
3470 	bool success;
3471 	void *ptr;
3472 
3473 	if (head) {
3474 		ptr = (void *) head - (unsigned long) func;
3475 	} else {
3476 		/*
3477 		 * Please note there is a limitation for the head-less
3478 		 * variant, that is why there is a clear rule for such
3479 		 * objects: it can be used from might_sleep() context
3480 		 * only. For other places please embed an rcu_head to
3481 		 * your data.
3482 		 */
3483 		might_sleep();
3484 		ptr = (unsigned long *) func;
3485 	}
3486 
3487 	krcp = krc_this_cpu_lock(&flags);
3488 
3489 	// Queue the object but don't yet schedule the batch.
3490 	if (debug_rcu_head_queue(ptr)) {
3491 		// Probable double kfree_rcu(), just leak.
3492 		WARN_ONCE(1, "%s(): Double-freed call. rcu_head %p\n",
3493 			  __func__, head);
3494 
3495 		// Mark as success and leave.
3496 		success = true;
3497 		goto unlock_return;
3498 	}
3499 
3500 	success = kvfree_call_rcu_add_ptr_to_bulk(krcp, ptr);
3501 	if (!success) {
3502 		run_page_cache_worker(krcp);
3503 
3504 		if (head == NULL)
3505 			// Inline if kvfree_rcu(one_arg) call.
3506 			goto unlock_return;
3507 
3508 		head->func = func;
3509 		head->next = krcp->head;
3510 		krcp->head = head;
3511 		success = true;
3512 	}
3513 
3514 	WRITE_ONCE(krcp->count, krcp->count + 1);
3515 
3516 	// Set timer to drain after KFREE_DRAIN_JIFFIES.
3517 	if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
3518 	    !krcp->monitor_todo) {
3519 		krcp->monitor_todo = true;
3520 		schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
3521 	}
3522 
3523 unlock_return:
3524 	krc_this_cpu_unlock(krcp, flags);
3525 
3526 	/*
3527 	 * Inline kvfree() after synchronize_rcu(). We can do
3528 	 * it from might_sleep() context only, so the current
3529 	 * CPU can pass the QS state.
3530 	 */
3531 	if (!success) {
3532 		debug_rcu_head_unqueue((struct rcu_head *) ptr);
3533 		synchronize_rcu();
3534 		kvfree(ptr);
3535 	}
3536 }
3537 EXPORT_SYMBOL_GPL(kvfree_call_rcu);
3538 
3539 static unsigned long
kfree_rcu_shrink_count(struct shrinker * shrink,struct shrink_control * sc)3540 kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
3541 {
3542 	int cpu;
3543 	unsigned long count = 0;
3544 
3545 	/* Snapshot count of all CPUs */
3546 	for_each_possible_cpu(cpu) {
3547 		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3548 
3549 		count += READ_ONCE(krcp->count);
3550 	}
3551 
3552 	return count;
3553 }
3554 
3555 static unsigned long
kfree_rcu_shrink_scan(struct shrinker * shrink,struct shrink_control * sc)3556 kfree_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
3557 {
3558 	int cpu, freed = 0;
3559 	unsigned long flags;
3560 
3561 	for_each_possible_cpu(cpu) {
3562 		int count;
3563 		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3564 
3565 		count = krcp->count;
3566 		raw_spin_lock_irqsave(&krcp->lock, flags);
3567 		if (krcp->monitor_todo)
3568 			kfree_rcu_drain_unlock(krcp, flags);
3569 		else
3570 			raw_spin_unlock_irqrestore(&krcp->lock, flags);
3571 
3572 		sc->nr_to_scan -= count;
3573 		freed += count;
3574 
3575 		if (sc->nr_to_scan <= 0)
3576 			break;
3577 	}
3578 
3579 	return freed == 0 ? SHRINK_STOP : freed;
3580 }
3581 
3582 static struct shrinker kfree_rcu_shrinker = {
3583 	.count_objects = kfree_rcu_shrink_count,
3584 	.scan_objects = kfree_rcu_shrink_scan,
3585 	.batch = 0,
3586 	.seeks = DEFAULT_SEEKS,
3587 };
3588 
kfree_rcu_scheduler_running(void)3589 void __init kfree_rcu_scheduler_running(void)
3590 {
3591 	int cpu;
3592 	unsigned long flags;
3593 
3594 	for_each_possible_cpu(cpu) {
3595 		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3596 
3597 		raw_spin_lock_irqsave(&krcp->lock, flags);
3598 		if (!krcp->head || krcp->monitor_todo) {
3599 			raw_spin_unlock_irqrestore(&krcp->lock, flags);
3600 			continue;
3601 		}
3602 		krcp->monitor_todo = true;
3603 		schedule_delayed_work_on(cpu, &krcp->monitor_work,
3604 					 KFREE_DRAIN_JIFFIES);
3605 		raw_spin_unlock_irqrestore(&krcp->lock, flags);
3606 	}
3607 }
3608 
3609 /*
3610  * During early boot, any blocking grace-period wait automatically
3611  * implies a grace period.  Later on, this is never the case for PREEMPTION.
3612  *
3613  * Howevr, because a context switch is a grace period for !PREEMPTION, any
3614  * blocking grace-period wait automatically implies a grace period if
3615  * there is only one CPU online at any point time during execution of
3616  * either synchronize_rcu() or synchronize_rcu_expedited().  It is OK to
3617  * occasionally incorrectly indicate that there are multiple CPUs online
3618  * when there was in fact only one the whole time, as this just adds some
3619  * overhead: RCU still operates correctly.
3620  */
rcu_blocking_is_gp(void)3621 static int rcu_blocking_is_gp(void)
3622 {
3623 	int ret;
3624 
3625 	if (IS_ENABLED(CONFIG_PREEMPTION))
3626 		return rcu_scheduler_active == RCU_SCHEDULER_INACTIVE;
3627 	might_sleep();  /* Check for RCU read-side critical section. */
3628 	preempt_disable();
3629 	ret = num_online_cpus() <= 1;
3630 	preempt_enable();
3631 	return ret;
3632 }
3633 
3634 /**
3635  * synchronize_rcu - wait until a grace period has elapsed.
3636  *
3637  * Control will return to the caller some time after a full grace
3638  * period has elapsed, in other words after all currently executing RCU
3639  * read-side critical sections have completed.  Note, however, that
3640  * upon return from synchronize_rcu(), the caller might well be executing
3641  * concurrently with new RCU read-side critical sections that began while
3642  * synchronize_rcu() was waiting.  RCU read-side critical sections are
3643  * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
3644  * In addition, regions of code across which interrupts, preemption, or
3645  * softirqs have been disabled also serve as RCU read-side critical
3646  * sections.  This includes hardware interrupt handlers, softirq handlers,
3647  * and NMI handlers.
3648  *
3649  * Note that this guarantee implies further memory-ordering guarantees.
3650  * On systems with more than one CPU, when synchronize_rcu() returns,
3651  * each CPU is guaranteed to have executed a full memory barrier since
3652  * the end of its last RCU read-side critical section whose beginning
3653  * preceded the call to synchronize_rcu().  In addition, each CPU having
3654  * an RCU read-side critical section that extends beyond the return from
3655  * synchronize_rcu() is guaranteed to have executed a full memory barrier
3656  * after the beginning of synchronize_rcu() and before the beginning of
3657  * that RCU read-side critical section.  Note that these guarantees include
3658  * CPUs that are offline, idle, or executing in user mode, as well as CPUs
3659  * that are executing in the kernel.
3660  *
3661  * Furthermore, if CPU A invoked synchronize_rcu(), which returned
3662  * to its caller on CPU B, then both CPU A and CPU B are guaranteed
3663  * to have executed a full memory barrier during the execution of
3664  * synchronize_rcu() -- even if CPU A and CPU B are the same CPU (but
3665  * again only if the system has more than one CPU).
3666  */
synchronize_rcu(void)3667 void synchronize_rcu(void)
3668 {
3669 	RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
3670 			 lock_is_held(&rcu_lock_map) ||
3671 			 lock_is_held(&rcu_sched_lock_map),
3672 			 "Illegal synchronize_rcu() in RCU read-side critical section");
3673 	if (rcu_blocking_is_gp())
3674 		return;
3675 	if (rcu_gp_is_expedited())
3676 		synchronize_rcu_expedited();
3677 	else
3678 		wait_rcu_gp(call_rcu);
3679 }
3680 EXPORT_SYMBOL_GPL(synchronize_rcu);
3681 
3682 /**
3683  * get_state_synchronize_rcu - Snapshot current RCU state
3684  *
3685  * Returns a cookie that is used by a later call to cond_synchronize_rcu()
3686  * to determine whether or not a full grace period has elapsed in the
3687  * meantime.
3688  */
get_state_synchronize_rcu(void)3689 unsigned long get_state_synchronize_rcu(void)
3690 {
3691 	/*
3692 	 * Any prior manipulation of RCU-protected data must happen
3693 	 * before the load from ->gp_seq.
3694 	 */
3695 	smp_mb();  /* ^^^ */
3696 	return rcu_seq_snap(&rcu_state.gp_seq);
3697 }
3698 EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
3699 
3700 /**
3701  * cond_synchronize_rcu - Conditionally wait for an RCU grace period
3702  *
3703  * @oldstate: return value from earlier call to get_state_synchronize_rcu()
3704  *
3705  * If a full RCU grace period has elapsed since the earlier call to
3706  * get_state_synchronize_rcu(), just return.  Otherwise, invoke
3707  * synchronize_rcu() to wait for a full grace period.
3708  *
3709  * Yes, this function does not take counter wrap into account.  But
3710  * counter wrap is harmless.  If the counter wraps, we have waited for
3711  * more than 2 billion grace periods (and way more on a 64-bit system!),
3712  * so waiting for one additional grace period should be just fine.
3713  */
cond_synchronize_rcu(unsigned long oldstate)3714 void cond_synchronize_rcu(unsigned long oldstate)
3715 {
3716 	if (!rcu_seq_done(&rcu_state.gp_seq, oldstate))
3717 		synchronize_rcu();
3718 	else
3719 		smp_mb(); /* Ensure GP ends before subsequent accesses. */
3720 }
3721 EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
3722 
3723 /*
3724  * Check to see if there is any immediate RCU-related work to be done by
3725  * the current CPU, returning 1 if so and zero otherwise.  The checks are
3726  * in order of increasing expense: checks that can be carried out against
3727  * CPU-local state are performed first.  However, we must check for CPU
3728  * stalls first, else we might not get a chance.
3729  */
rcu_pending(int user)3730 static int rcu_pending(int user)
3731 {
3732 	bool gp_in_progress;
3733 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
3734 	struct rcu_node *rnp = rdp->mynode;
3735 
3736 	lockdep_assert_irqs_disabled();
3737 
3738 	/* Check for CPU stalls, if enabled. */
3739 	check_cpu_stall(rdp);
3740 
3741 	/* Does this CPU need a deferred NOCB wakeup? */
3742 	if (rcu_nocb_need_deferred_wakeup(rdp))
3743 		return 1;
3744 
3745 	/* Is this a nohz_full CPU in userspace or idle?  (Ignore RCU if so.) */
3746 	if ((user || rcu_is_cpu_rrupt_from_idle()) && rcu_nohz_full_cpu())
3747 		return 0;
3748 
3749 	/* Is the RCU core waiting for a quiescent state from this CPU? */
3750 	gp_in_progress = rcu_gp_in_progress();
3751 	if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm && gp_in_progress)
3752 		return 1;
3753 
3754 	/* Does this CPU have callbacks ready to invoke? */
3755 	if (rcu_segcblist_ready_cbs(&rdp->cblist))
3756 		return 1;
3757 
3758 	/* Has RCU gone idle with this CPU needing another grace period? */
3759 	if (!gp_in_progress && rcu_segcblist_is_enabled(&rdp->cblist) &&
3760 	    (!IS_ENABLED(CONFIG_RCU_NOCB_CPU) ||
3761 	     !rcu_segcblist_is_offloaded(&rdp->cblist)) &&
3762 	    !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
3763 		return 1;
3764 
3765 	/* Have RCU grace period completed or started?  */
3766 	if (rcu_seq_current(&rnp->gp_seq) != rdp->gp_seq ||
3767 	    unlikely(READ_ONCE(rdp->gpwrap))) /* outside lock */
3768 		return 1;
3769 
3770 	/* nothing to do */
3771 	return 0;
3772 }
3773 
3774 /*
3775  * Helper function for rcu_barrier() tracing.  If tracing is disabled,
3776  * the compiler is expected to optimize this away.
3777  */
rcu_barrier_trace(const char * s,int cpu,unsigned long done)3778 static void rcu_barrier_trace(const char *s, int cpu, unsigned long done)
3779 {
3780 	trace_rcu_barrier(rcu_state.name, s, cpu,
3781 			  atomic_read(&rcu_state.barrier_cpu_count), done);
3782 }
3783 
3784 /*
3785  * RCU callback function for rcu_barrier().  If we are last, wake
3786  * up the task executing rcu_barrier().
3787  *
3788  * Note that the value of rcu_state.barrier_sequence must be captured
3789  * before the atomic_dec_and_test().  Otherwise, if this CPU is not last,
3790  * other CPUs might count the value down to zero before this CPU gets
3791  * around to invoking rcu_barrier_trace(), which might result in bogus
3792  * data from the next instance of rcu_barrier().
3793  */
rcu_barrier_callback(struct rcu_head * rhp)3794 static void rcu_barrier_callback(struct rcu_head *rhp)
3795 {
3796 	unsigned long __maybe_unused s = rcu_state.barrier_sequence;
3797 
3798 	if (atomic_dec_and_test(&rcu_state.barrier_cpu_count)) {
3799 		rcu_barrier_trace(TPS("LastCB"), -1, s);
3800 		complete(&rcu_state.barrier_completion);
3801 	} else {
3802 		rcu_barrier_trace(TPS("CB"), -1, s);
3803 	}
3804 }
3805 
3806 /*
3807  * Called with preemption disabled, and from cross-cpu IRQ context.
3808  */
rcu_barrier_func(void * cpu_in)3809 static void rcu_barrier_func(void *cpu_in)
3810 {
3811 	uintptr_t cpu = (uintptr_t)cpu_in;
3812 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
3813 
3814 	rcu_barrier_trace(TPS("IRQ"), -1, rcu_state.barrier_sequence);
3815 	rdp->barrier_head.func = rcu_barrier_callback;
3816 	debug_rcu_head_queue(&rdp->barrier_head);
3817 	rcu_nocb_lock(rdp);
3818 	WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies));
3819 	if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head)) {
3820 		atomic_inc(&rcu_state.barrier_cpu_count);
3821 	} else {
3822 		debug_rcu_head_unqueue(&rdp->barrier_head);
3823 		rcu_barrier_trace(TPS("IRQNQ"), -1,
3824 				  rcu_state.barrier_sequence);
3825 	}
3826 	rcu_nocb_unlock(rdp);
3827 }
3828 
3829 /**
3830  * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
3831  *
3832  * Note that this primitive does not necessarily wait for an RCU grace period
3833  * to complete.  For example, if there are no RCU callbacks queued anywhere
3834  * in the system, then rcu_barrier() is within its rights to return
3835  * immediately, without waiting for anything, much less an RCU grace period.
3836  */
rcu_barrier(void)3837 void rcu_barrier(void)
3838 {
3839 	uintptr_t cpu;
3840 	struct rcu_data *rdp;
3841 	unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence);
3842 
3843 	rcu_barrier_trace(TPS("Begin"), -1, s);
3844 
3845 	/* Take mutex to serialize concurrent rcu_barrier() requests. */
3846 	mutex_lock(&rcu_state.barrier_mutex);
3847 
3848 	/* Did someone else do our work for us? */
3849 	if (rcu_seq_done(&rcu_state.barrier_sequence, s)) {
3850 		rcu_barrier_trace(TPS("EarlyExit"), -1,
3851 				  rcu_state.barrier_sequence);
3852 		smp_mb(); /* caller's subsequent code after above check. */
3853 		mutex_unlock(&rcu_state.barrier_mutex);
3854 		return;
3855 	}
3856 
3857 	/* Mark the start of the barrier operation. */
3858 	rcu_seq_start(&rcu_state.barrier_sequence);
3859 	rcu_barrier_trace(TPS("Inc1"), -1, rcu_state.barrier_sequence);
3860 
3861 	/*
3862 	 * Initialize the count to two rather than to zero in order
3863 	 * to avoid a too-soon return to zero in case of an immediate
3864 	 * invocation of the just-enqueued callback (or preemption of
3865 	 * this task).  Exclude CPU-hotplug operations to ensure that no
3866 	 * offline non-offloaded CPU has callbacks queued.
3867 	 */
3868 	init_completion(&rcu_state.barrier_completion);
3869 	atomic_set(&rcu_state.barrier_cpu_count, 2);
3870 	get_online_cpus();
3871 
3872 	/*
3873 	 * Force each CPU with callbacks to register a new callback.
3874 	 * When that callback is invoked, we will know that all of the
3875 	 * corresponding CPU's preceding callbacks have been invoked.
3876 	 */
3877 	for_each_possible_cpu(cpu) {
3878 		rdp = per_cpu_ptr(&rcu_data, cpu);
3879 		if (cpu_is_offline(cpu) &&
3880 		    !rcu_segcblist_is_offloaded(&rdp->cblist))
3881 			continue;
3882 		if (rcu_segcblist_n_cbs(&rdp->cblist) && cpu_online(cpu)) {
3883 			rcu_barrier_trace(TPS("OnlineQ"), cpu,
3884 					  rcu_state.barrier_sequence);
3885 			smp_call_function_single(cpu, rcu_barrier_func, (void *)cpu, 1);
3886 		} else if (rcu_segcblist_n_cbs(&rdp->cblist) &&
3887 			   cpu_is_offline(cpu)) {
3888 			rcu_barrier_trace(TPS("OfflineNoCBQ"), cpu,
3889 					  rcu_state.barrier_sequence);
3890 			local_irq_disable();
3891 			rcu_barrier_func((void *)cpu);
3892 			local_irq_enable();
3893 		} else if (cpu_is_offline(cpu)) {
3894 			rcu_barrier_trace(TPS("OfflineNoCBNoQ"), cpu,
3895 					  rcu_state.barrier_sequence);
3896 		} else {
3897 			rcu_barrier_trace(TPS("OnlineNQ"), cpu,
3898 					  rcu_state.barrier_sequence);
3899 		}
3900 	}
3901 	put_online_cpus();
3902 
3903 	/*
3904 	 * Now that we have an rcu_barrier_callback() callback on each
3905 	 * CPU, and thus each counted, remove the initial count.
3906 	 */
3907 	if (atomic_sub_and_test(2, &rcu_state.barrier_cpu_count))
3908 		complete(&rcu_state.barrier_completion);
3909 
3910 	/* Wait for all rcu_barrier_callback() callbacks to be invoked. */
3911 	wait_for_completion(&rcu_state.barrier_completion);
3912 
3913 	/* Mark the end of the barrier operation. */
3914 	rcu_barrier_trace(TPS("Inc2"), -1, rcu_state.barrier_sequence);
3915 	rcu_seq_end(&rcu_state.barrier_sequence);
3916 
3917 	/* Other rcu_barrier() invocations can now safely proceed. */
3918 	mutex_unlock(&rcu_state.barrier_mutex);
3919 }
3920 EXPORT_SYMBOL_GPL(rcu_barrier);
3921 
3922 /*
3923  * Propagate ->qsinitmask bits up the rcu_node tree to account for the
3924  * first CPU in a given leaf rcu_node structure coming online.  The caller
3925  * must hold the corresponding leaf rcu_node ->lock with interrrupts
3926  * disabled.
3927  */
rcu_init_new_rnp(struct rcu_node * rnp_leaf)3928 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
3929 {
3930 	long mask;
3931 	long oldmask;
3932 	struct rcu_node *rnp = rnp_leaf;
3933 
3934 	raw_lockdep_assert_held_rcu_node(rnp_leaf);
3935 	WARN_ON_ONCE(rnp->wait_blkd_tasks);
3936 	for (;;) {
3937 		mask = rnp->grpmask;
3938 		rnp = rnp->parent;
3939 		if (rnp == NULL)
3940 			return;
3941 		raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */
3942 		oldmask = rnp->qsmaskinit;
3943 		rnp->qsmaskinit |= mask;
3944 		raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */
3945 		if (oldmask)
3946 			return;
3947 	}
3948 }
3949 
3950 /*
3951  * Do boot-time initialization of a CPU's per-CPU RCU data.
3952  */
3953 static void __init
rcu_boot_init_percpu_data(int cpu)3954 rcu_boot_init_percpu_data(int cpu)
3955 {
3956 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
3957 
3958 	/* Set up local state, ensuring consistent view of global state. */
3959 	rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
3960 	INIT_WORK(&rdp->strict_work, strict_work_handler);
3961 	WARN_ON_ONCE(rdp->dynticks_nesting != 1);
3962 	WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp)));
3963 	rdp->rcu_ofl_gp_seq = rcu_state.gp_seq;
3964 	rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED;
3965 	rdp->rcu_onl_gp_seq = rcu_state.gp_seq;
3966 	rdp->rcu_onl_gp_flags = RCU_GP_CLEANED;
3967 	rdp->cpu = cpu;
3968 	rcu_boot_init_nocb_percpu_data(rdp);
3969 }
3970 
3971 /*
3972  * Invoked early in the CPU-online process, when pretty much all services
3973  * are available.  The incoming CPU is not present.
3974  *
3975  * Initializes a CPU's per-CPU RCU data.  Note that only one online or
3976  * offline event can be happening at a given time.  Note also that we can
3977  * accept some slop in the rsp->gp_seq access due to the fact that this
3978  * CPU cannot possibly have any non-offloaded RCU callbacks in flight yet.
3979  * And any offloaded callbacks are being numbered elsewhere.
3980  */
rcutree_prepare_cpu(unsigned int cpu)3981 int rcutree_prepare_cpu(unsigned int cpu)
3982 {
3983 	unsigned long flags;
3984 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
3985 	struct rcu_node *rnp = rcu_get_root();
3986 
3987 	/* Set up local state, ensuring consistent view of global state. */
3988 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
3989 	rdp->qlen_last_fqs_check = 0;
3990 	rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
3991 	rdp->blimit = blimit;
3992 	if (rcu_segcblist_empty(&rdp->cblist) && /* No early-boot CBs? */
3993 	    !rcu_segcblist_is_offloaded(&rdp->cblist))
3994 		rcu_segcblist_init(&rdp->cblist);  /* Re-enable callbacks. */
3995 	rdp->dynticks_nesting = 1;	/* CPU not up, no tearing. */
3996 	rcu_dynticks_eqs_online();
3997 	raw_spin_unlock_rcu_node(rnp);		/* irqs remain disabled. */
3998 
3999 	/*
4000 	 * Add CPU to leaf rcu_node pending-online bitmask.  Any needed
4001 	 * propagation up the rcu_node tree will happen at the beginning
4002 	 * of the next grace period.
4003 	 */
4004 	rnp = rdp->mynode;
4005 	raw_spin_lock_rcu_node(rnp);		/* irqs already disabled. */
4006 	rdp->beenonline = true;	 /* We have now been online. */
4007 	rdp->gp_seq = READ_ONCE(rnp->gp_seq);
4008 	rdp->gp_seq_needed = rdp->gp_seq;
4009 	rdp->cpu_no_qs.b.norm = true;
4010 	rdp->core_needs_qs = false;
4011 	rdp->rcu_iw_pending = false;
4012 	rdp->rcu_iw_gp_seq = rdp->gp_seq - 1;
4013 	trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl"));
4014 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4015 	rcu_prepare_kthreads(cpu);
4016 	rcu_spawn_cpu_nocb_kthread(cpu);
4017 
4018 	return 0;
4019 }
4020 
4021 /*
4022  * Update RCU priority boot kthread affinity for CPU-hotplug changes.
4023  */
rcutree_affinity_setting(unsigned int cpu,int outgoing)4024 static void rcutree_affinity_setting(unsigned int cpu, int outgoing)
4025 {
4026 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4027 
4028 	rcu_boost_kthread_setaffinity(rdp->mynode, outgoing);
4029 }
4030 
4031 /*
4032  * Near the end of the CPU-online process.  Pretty much all services
4033  * enabled, and the CPU is now very much alive.
4034  */
rcutree_online_cpu(unsigned int cpu)4035 int rcutree_online_cpu(unsigned int cpu)
4036 {
4037 	unsigned long flags;
4038 	struct rcu_data *rdp;
4039 	struct rcu_node *rnp;
4040 
4041 	rdp = per_cpu_ptr(&rcu_data, cpu);
4042 	rnp = rdp->mynode;
4043 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
4044 	rnp->ffmask |= rdp->grpmask;
4045 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4046 	if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
4047 		return 0; /* Too early in boot for scheduler work. */
4048 	sync_sched_exp_online_cleanup(cpu);
4049 	rcutree_affinity_setting(cpu, -1);
4050 
4051 	// Stop-machine done, so allow nohz_full to disable tick.
4052 	tick_dep_clear(TICK_DEP_BIT_RCU);
4053 	return 0;
4054 }
4055 
4056 /*
4057  * Near the beginning of the process.  The CPU is still very much alive
4058  * with pretty much all services enabled.
4059  */
rcutree_offline_cpu(unsigned int cpu)4060 int rcutree_offline_cpu(unsigned int cpu)
4061 {
4062 	unsigned long flags;
4063 	struct rcu_data *rdp;
4064 	struct rcu_node *rnp;
4065 
4066 	rdp = per_cpu_ptr(&rcu_data, cpu);
4067 	rnp = rdp->mynode;
4068 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
4069 	rnp->ffmask &= ~rdp->grpmask;
4070 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4071 
4072 	rcutree_affinity_setting(cpu, cpu);
4073 
4074 	// nohz_full CPUs need the tick for stop-machine to work quickly
4075 	tick_dep_set(TICK_DEP_BIT_RCU);
4076 	return 0;
4077 }
4078 
4079 /*
4080  * Mark the specified CPU as being online so that subsequent grace periods
4081  * (both expedited and normal) will wait on it.  Note that this means that
4082  * incoming CPUs are not allowed to use RCU read-side critical sections
4083  * until this function is called.  Failing to observe this restriction
4084  * will result in lockdep splats.
4085  *
4086  * Note that this function is special in that it is invoked directly
4087  * from the incoming CPU rather than from the cpuhp_step mechanism.
4088  * This is because this function must be invoked at a precise location.
4089  */
rcu_cpu_starting(unsigned int cpu)4090 void rcu_cpu_starting(unsigned int cpu)
4091 {
4092 	unsigned long flags;
4093 	unsigned long mask;
4094 	struct rcu_data *rdp;
4095 	struct rcu_node *rnp;
4096 	bool newcpu;
4097 
4098 	rdp = per_cpu_ptr(&rcu_data, cpu);
4099 	if (rdp->cpu_started)
4100 		return;
4101 	rdp->cpu_started = true;
4102 
4103 	rnp = rdp->mynode;
4104 	mask = rdp->grpmask;
4105 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
4106 	WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext | mask);
4107 	newcpu = !(rnp->expmaskinitnext & mask);
4108 	rnp->expmaskinitnext |= mask;
4109 	/* Allow lockless access for expedited grace periods. */
4110 	smp_store_release(&rcu_state.ncpus, rcu_state.ncpus + newcpu); /* ^^^ */
4111 	ASSERT_EXCLUSIVE_WRITER(rcu_state.ncpus);
4112 	rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */
4113 	rdp->rcu_onl_gp_seq = READ_ONCE(rcu_state.gp_seq);
4114 	rdp->rcu_onl_gp_flags = READ_ONCE(rcu_state.gp_flags);
4115 	if (rnp->qsmask & mask) { /* RCU waiting on incoming CPU? */
4116 		rcu_disable_urgency_upon_qs(rdp);
4117 		/* Report QS -after- changing ->qsmaskinitnext! */
4118 		rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
4119 	} else {
4120 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4121 	}
4122 	smp_mb(); /* Ensure RCU read-side usage follows above initialization. */
4123 }
4124 
4125 /*
4126  * The outgoing function has no further need of RCU, so remove it from
4127  * the rcu_node tree's ->qsmaskinitnext bit masks.
4128  *
4129  * Note that this function is special in that it is invoked directly
4130  * from the outgoing CPU rather than from the cpuhp_step mechanism.
4131  * This is because this function must be invoked at a precise location.
4132  */
rcu_report_dead(unsigned int cpu)4133 void rcu_report_dead(unsigned int cpu)
4134 {
4135 	unsigned long flags;
4136 	unsigned long mask;
4137 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4138 	struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */
4139 
4140 	/* QS for any half-done expedited grace period. */
4141 	preempt_disable();
4142 	rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
4143 	preempt_enable();
4144 	rcu_preempt_deferred_qs(current);
4145 
4146 	/* Remove outgoing CPU from mask in the leaf rcu_node structure. */
4147 	mask = rdp->grpmask;
4148 	raw_spin_lock(&rcu_state.ofl_lock);
4149 	raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
4150 	rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq);
4151 	rdp->rcu_ofl_gp_flags = READ_ONCE(rcu_state.gp_flags);
4152 	if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */
4153 		/* Report quiescent state -before- changing ->qsmaskinitnext! */
4154 		rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
4155 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
4156 	}
4157 	WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext & ~mask);
4158 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4159 	raw_spin_unlock(&rcu_state.ofl_lock);
4160 
4161 	rdp->cpu_started = false;
4162 }
4163 
4164 #ifdef CONFIG_HOTPLUG_CPU
4165 /*
4166  * The outgoing CPU has just passed through the dying-idle state, and we
4167  * are being invoked from the CPU that was IPIed to continue the offline
4168  * operation.  Migrate the outgoing CPU's callbacks to the current CPU.
4169  */
rcutree_migrate_callbacks(int cpu)4170 void rcutree_migrate_callbacks(int cpu)
4171 {
4172 	unsigned long flags;
4173 	struct rcu_data *my_rdp;
4174 	struct rcu_node *my_rnp;
4175 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4176 	bool needwake;
4177 
4178 	if (rcu_segcblist_is_offloaded(&rdp->cblist) ||
4179 	    rcu_segcblist_empty(&rdp->cblist))
4180 		return;  /* No callbacks to migrate. */
4181 
4182 	local_irq_save(flags);
4183 	my_rdp = this_cpu_ptr(&rcu_data);
4184 	my_rnp = my_rdp->mynode;
4185 	rcu_nocb_lock(my_rdp); /* irqs already disabled. */
4186 	WARN_ON_ONCE(!rcu_nocb_flush_bypass(my_rdp, NULL, jiffies));
4187 	raw_spin_lock_rcu_node(my_rnp); /* irqs already disabled. */
4188 	/* Leverage recent GPs and set GP for new callbacks. */
4189 	needwake = rcu_advance_cbs(my_rnp, rdp) ||
4190 		   rcu_advance_cbs(my_rnp, my_rdp);
4191 	rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist);
4192 	needwake = needwake || rcu_advance_cbs(my_rnp, my_rdp);
4193 	rcu_segcblist_disable(&rdp->cblist);
4194 	WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) !=
4195 		     !rcu_segcblist_n_cbs(&my_rdp->cblist));
4196 	if (rcu_segcblist_is_offloaded(&my_rdp->cblist)) {
4197 		raw_spin_unlock_rcu_node(my_rnp); /* irqs remain disabled. */
4198 		__call_rcu_nocb_wake(my_rdp, true, flags);
4199 	} else {
4200 		rcu_nocb_unlock(my_rdp); /* irqs remain disabled. */
4201 		raw_spin_unlock_irqrestore_rcu_node(my_rnp, flags);
4202 	}
4203 	if (needwake)
4204 		rcu_gp_kthread_wake();
4205 	lockdep_assert_irqs_enabled();
4206 	WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 ||
4207 		  !rcu_segcblist_empty(&rdp->cblist),
4208 		  "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, 1stCB=%p\n",
4209 		  cpu, rcu_segcblist_n_cbs(&rdp->cblist),
4210 		  rcu_segcblist_first_cb(&rdp->cblist));
4211 }
4212 #endif
4213 
4214 /*
4215  * On non-huge systems, use expedited RCU grace periods to make suspend
4216  * and hibernation run faster.
4217  */
rcu_pm_notify(struct notifier_block * self,unsigned long action,void * hcpu)4218 static int rcu_pm_notify(struct notifier_block *self,
4219 			 unsigned long action, void *hcpu)
4220 {
4221 	switch (action) {
4222 	case PM_HIBERNATION_PREPARE:
4223 	case PM_SUSPEND_PREPARE:
4224 		rcu_expedite_gp();
4225 		break;
4226 	case PM_POST_HIBERNATION:
4227 	case PM_POST_SUSPEND:
4228 		rcu_unexpedite_gp();
4229 		break;
4230 	default:
4231 		break;
4232 	}
4233 	return NOTIFY_OK;
4234 }
4235 
4236 /*
4237  * Spawn the kthreads that handle RCU's grace periods.
4238  */
rcu_spawn_gp_kthread(void)4239 static int __init rcu_spawn_gp_kthread(void)
4240 {
4241 	unsigned long flags;
4242 	int kthread_prio_in = kthread_prio;
4243 	struct rcu_node *rnp;
4244 	struct sched_param sp;
4245 	struct task_struct *t;
4246 
4247 	/* Force priority into range. */
4248 	if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 2
4249 	    && IS_BUILTIN(CONFIG_RCU_TORTURE_TEST))
4250 		kthread_prio = 2;
4251 	else if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
4252 		kthread_prio = 1;
4253 	else if (kthread_prio < 0)
4254 		kthread_prio = 0;
4255 	else if (kthread_prio > 99)
4256 		kthread_prio = 99;
4257 
4258 	if (kthread_prio != kthread_prio_in)
4259 		pr_alert("rcu_spawn_gp_kthread(): Limited prio to %d from %d\n",
4260 			 kthread_prio, kthread_prio_in);
4261 
4262 	rcu_scheduler_fully_active = 1;
4263 	t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name);
4264 	if (WARN_ONCE(IS_ERR(t), "%s: Could not start grace-period kthread, OOM is now expected behavior\n", __func__))
4265 		return 0;
4266 	if (kthread_prio) {
4267 		sp.sched_priority = kthread_prio;
4268 		sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
4269 	}
4270 	rnp = rcu_get_root();
4271 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
4272 	WRITE_ONCE(rcu_state.gp_activity, jiffies);
4273 	WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
4274 	// Reset .gp_activity and .gp_req_activity before setting .gp_kthread.
4275 	smp_store_release(&rcu_state.gp_kthread, t);  /* ^^^ */
4276 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4277 	wake_up_process(t);
4278 	rcu_spawn_nocb_kthreads();
4279 	rcu_spawn_boost_kthreads();
4280 	rcu_spawn_core_kthreads();
4281 	return 0;
4282 }
4283 early_initcall(rcu_spawn_gp_kthread);
4284 
4285 /*
4286  * This function is invoked towards the end of the scheduler's
4287  * initialization process.  Before this is called, the idle task might
4288  * contain synchronous grace-period primitives (during which time, this idle
4289  * task is booting the system, and such primitives are no-ops).  After this
4290  * function is called, any synchronous grace-period primitives are run as
4291  * expedited, with the requesting task driving the grace period forward.
4292  * A later core_initcall() rcu_set_runtime_mode() will switch to full
4293  * runtime RCU functionality.
4294  */
rcu_scheduler_starting(void)4295 void rcu_scheduler_starting(void)
4296 {
4297 	WARN_ON(num_online_cpus() != 1);
4298 	WARN_ON(nr_context_switches() > 0);
4299 	rcu_test_sync_prims();
4300 	rcu_scheduler_active = RCU_SCHEDULER_INIT;
4301 	rcu_test_sync_prims();
4302 }
4303 
4304 /*
4305  * Helper function for rcu_init() that initializes the rcu_state structure.
4306  */
rcu_init_one(void)4307 static void __init rcu_init_one(void)
4308 {
4309 	static const char * const buf[] = RCU_NODE_NAME_INIT;
4310 	static const char * const fqs[] = RCU_FQS_NAME_INIT;
4311 	static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
4312 	static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
4313 
4314 	int levelspread[RCU_NUM_LVLS];		/* kids/node in each level. */
4315 	int cpustride = 1;
4316 	int i;
4317 	int j;
4318 	struct rcu_node *rnp;
4319 
4320 	BUILD_BUG_ON(RCU_NUM_LVLS > ARRAY_SIZE(buf));  /* Fix buf[] init! */
4321 
4322 	/* Silence gcc 4.8 false positive about array index out of range. */
4323 	if (rcu_num_lvls <= 0 || rcu_num_lvls > RCU_NUM_LVLS)
4324 		panic("rcu_init_one: rcu_num_lvls out of range");
4325 
4326 	/* Initialize the level-tracking arrays. */
4327 
4328 	for (i = 1; i < rcu_num_lvls; i++)
4329 		rcu_state.level[i] =
4330 			rcu_state.level[i - 1] + num_rcu_lvl[i - 1];
4331 	rcu_init_levelspread(levelspread, num_rcu_lvl);
4332 
4333 	/* Initialize the elements themselves, starting from the leaves. */
4334 
4335 	for (i = rcu_num_lvls - 1; i >= 0; i--) {
4336 		cpustride *= levelspread[i];
4337 		rnp = rcu_state.level[i];
4338 		for (j = 0; j < num_rcu_lvl[i]; j++, rnp++) {
4339 			raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock));
4340 			lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock),
4341 						   &rcu_node_class[i], buf[i]);
4342 			raw_spin_lock_init(&rnp->fqslock);
4343 			lockdep_set_class_and_name(&rnp->fqslock,
4344 						   &rcu_fqs_class[i], fqs[i]);
4345 			rnp->gp_seq = rcu_state.gp_seq;
4346 			rnp->gp_seq_needed = rcu_state.gp_seq;
4347 			rnp->completedqs = rcu_state.gp_seq;
4348 			rnp->qsmask = 0;
4349 			rnp->qsmaskinit = 0;
4350 			rnp->grplo = j * cpustride;
4351 			rnp->grphi = (j + 1) * cpustride - 1;
4352 			if (rnp->grphi >= nr_cpu_ids)
4353 				rnp->grphi = nr_cpu_ids - 1;
4354 			if (i == 0) {
4355 				rnp->grpnum = 0;
4356 				rnp->grpmask = 0;
4357 				rnp->parent = NULL;
4358 			} else {
4359 				rnp->grpnum = j % levelspread[i - 1];
4360 				rnp->grpmask = BIT(rnp->grpnum);
4361 				rnp->parent = rcu_state.level[i - 1] +
4362 					      j / levelspread[i - 1];
4363 			}
4364 			rnp->level = i;
4365 			INIT_LIST_HEAD(&rnp->blkd_tasks);
4366 			rcu_init_one_nocb(rnp);
4367 			init_waitqueue_head(&rnp->exp_wq[0]);
4368 			init_waitqueue_head(&rnp->exp_wq[1]);
4369 			init_waitqueue_head(&rnp->exp_wq[2]);
4370 			init_waitqueue_head(&rnp->exp_wq[3]);
4371 			spin_lock_init(&rnp->exp_lock);
4372 		}
4373 	}
4374 
4375 	init_swait_queue_head(&rcu_state.gp_wq);
4376 	init_swait_queue_head(&rcu_state.expedited_wq);
4377 	rnp = rcu_first_leaf_node();
4378 	for_each_possible_cpu(i) {
4379 		while (i > rnp->grphi)
4380 			rnp++;
4381 		per_cpu_ptr(&rcu_data, i)->mynode = rnp;
4382 		rcu_boot_init_percpu_data(i);
4383 	}
4384 }
4385 
4386 /*
4387  * Compute the rcu_node tree geometry from kernel parameters.  This cannot
4388  * replace the definitions in tree.h because those are needed to size
4389  * the ->node array in the rcu_state structure.
4390  */
rcu_init_geometry(void)4391 void rcu_init_geometry(void)
4392 {
4393 	ulong d;
4394 	int i;
4395 	static unsigned long old_nr_cpu_ids;
4396 	int rcu_capacity[RCU_NUM_LVLS];
4397 	static bool initialized;
4398 
4399 	if (initialized) {
4400 		/*
4401 		 * Warn if setup_nr_cpu_ids() had not yet been invoked,
4402 		 * unless nr_cpus_ids == NR_CPUS, in which case who cares?
4403 		 */
4404 		WARN_ON_ONCE(old_nr_cpu_ids != nr_cpu_ids);
4405 		return;
4406 	}
4407 
4408 	old_nr_cpu_ids = nr_cpu_ids;
4409 	initialized = true;
4410 
4411 	/*
4412 	 * Initialize any unspecified boot parameters.
4413 	 * The default values of jiffies_till_first_fqs and
4414 	 * jiffies_till_next_fqs are set to the RCU_JIFFIES_TILL_FORCE_QS
4415 	 * value, which is a function of HZ, then adding one for each
4416 	 * RCU_JIFFIES_FQS_DIV CPUs that might be on the system.
4417 	 */
4418 	d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
4419 	if (jiffies_till_first_fqs == ULONG_MAX)
4420 		jiffies_till_first_fqs = d;
4421 	if (jiffies_till_next_fqs == ULONG_MAX)
4422 		jiffies_till_next_fqs = d;
4423 	adjust_jiffies_till_sched_qs();
4424 
4425 	/* If the compile-time values are accurate, just leave. */
4426 	if (rcu_fanout_leaf == RCU_FANOUT_LEAF &&
4427 	    nr_cpu_ids == NR_CPUS)
4428 		return;
4429 	pr_info("Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n",
4430 		rcu_fanout_leaf, nr_cpu_ids);
4431 
4432 	/*
4433 	 * The boot-time rcu_fanout_leaf parameter must be at least two
4434 	 * and cannot exceed the number of bits in the rcu_node masks.
4435 	 * Complain and fall back to the compile-time values if this
4436 	 * limit is exceeded.
4437 	 */
4438 	if (rcu_fanout_leaf < 2 ||
4439 	    rcu_fanout_leaf > sizeof(unsigned long) * 8) {
4440 		rcu_fanout_leaf = RCU_FANOUT_LEAF;
4441 		WARN_ON(1);
4442 		return;
4443 	}
4444 
4445 	/*
4446 	 * Compute number of nodes that can be handled an rcu_node tree
4447 	 * with the given number of levels.
4448 	 */
4449 	rcu_capacity[0] = rcu_fanout_leaf;
4450 	for (i = 1; i < RCU_NUM_LVLS; i++)
4451 		rcu_capacity[i] = rcu_capacity[i - 1] * RCU_FANOUT;
4452 
4453 	/*
4454 	 * The tree must be able to accommodate the configured number of CPUs.
4455 	 * If this limit is exceeded, fall back to the compile-time values.
4456 	 */
4457 	if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1]) {
4458 		rcu_fanout_leaf = RCU_FANOUT_LEAF;
4459 		WARN_ON(1);
4460 		return;
4461 	}
4462 
4463 	/* Calculate the number of levels in the tree. */
4464 	for (i = 0; nr_cpu_ids > rcu_capacity[i]; i++) {
4465 	}
4466 	rcu_num_lvls = i + 1;
4467 
4468 	/* Calculate the number of rcu_nodes at each level of the tree. */
4469 	for (i = 0; i < rcu_num_lvls; i++) {
4470 		int cap = rcu_capacity[(rcu_num_lvls - 1) - i];
4471 		num_rcu_lvl[i] = DIV_ROUND_UP(nr_cpu_ids, cap);
4472 	}
4473 
4474 	/* Calculate the total number of rcu_node structures. */
4475 	rcu_num_nodes = 0;
4476 	for (i = 0; i < rcu_num_lvls; i++)
4477 		rcu_num_nodes += num_rcu_lvl[i];
4478 }
4479 
4480 /*
4481  * Dump out the structure of the rcu_node combining tree associated
4482  * with the rcu_state structure.
4483  */
rcu_dump_rcu_node_tree(void)4484 static void __init rcu_dump_rcu_node_tree(void)
4485 {
4486 	int level = 0;
4487 	struct rcu_node *rnp;
4488 
4489 	pr_info("rcu_node tree layout dump\n");
4490 	pr_info(" ");
4491 	rcu_for_each_node_breadth_first(rnp) {
4492 		if (rnp->level != level) {
4493 			pr_cont("\n");
4494 			pr_info(" ");
4495 			level = rnp->level;
4496 		}
4497 		pr_cont("%d:%d ^%d  ", rnp->grplo, rnp->grphi, rnp->grpnum);
4498 	}
4499 	pr_cont("\n");
4500 }
4501 
4502 struct workqueue_struct *rcu_gp_wq;
4503 struct workqueue_struct *rcu_par_gp_wq;
4504 
kfree_rcu_batch_init(void)4505 static void __init kfree_rcu_batch_init(void)
4506 {
4507 	int cpu;
4508 	int i;
4509 
4510 	for_each_possible_cpu(cpu) {
4511 		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
4512 
4513 		for (i = 0; i < KFREE_N_BATCHES; i++) {
4514 			INIT_RCU_WORK(&krcp->krw_arr[i].rcu_work, kfree_rcu_work);
4515 			krcp->krw_arr[i].krcp = krcp;
4516 		}
4517 
4518 		INIT_DELAYED_WORK(&krcp->monitor_work, kfree_rcu_monitor);
4519 		INIT_WORK(&krcp->page_cache_work, fill_page_cache_func);
4520 		krcp->initialized = true;
4521 	}
4522 	if (register_shrinker(&kfree_rcu_shrinker))
4523 		pr_err("Failed to register kfree_rcu() shrinker!\n");
4524 }
4525 
rcu_init(void)4526 void __init rcu_init(void)
4527 {
4528 	int cpu;
4529 
4530 	rcu_early_boot_tests();
4531 
4532 	kfree_rcu_batch_init();
4533 	rcu_bootup_announce();
4534 	rcu_init_geometry();
4535 	rcu_init_one();
4536 	if (dump_tree)
4537 		rcu_dump_rcu_node_tree();
4538 	if (use_softirq)
4539 		open_softirq(RCU_SOFTIRQ, rcu_core_si);
4540 
4541 	/*
4542 	 * We don't need protection against CPU-hotplug here because
4543 	 * this is called early in boot, before either interrupts
4544 	 * or the scheduler are operational.
4545 	 */
4546 	pm_notifier(rcu_pm_notify, 0);
4547 	for_each_online_cpu(cpu) {
4548 		rcutree_prepare_cpu(cpu);
4549 		rcu_cpu_starting(cpu);
4550 		rcutree_online_cpu(cpu);
4551 	}
4552 
4553 	/* Create workqueue for expedited GPs and for Tree SRCU. */
4554 	rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM, 0);
4555 	WARN_ON(!rcu_gp_wq);
4556 	rcu_par_gp_wq = alloc_workqueue("rcu_par_gp", WQ_MEM_RECLAIM, 0);
4557 	WARN_ON(!rcu_par_gp_wq);
4558 	srcu_init();
4559 
4560 	/* Fill in default value for rcutree.qovld boot parameter. */
4561 	/* -After- the rcu_node ->lock fields are initialized! */
4562 	if (qovld < 0)
4563 		qovld_calc = DEFAULT_RCU_QOVLD_MULT * qhimark;
4564 	else
4565 		qovld_calc = qovld;
4566 }
4567 
4568 #include "tree_stall.h"
4569 #include "tree_exp.h"
4570 #include "tree_plugin.h"
4571