• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Read-Copy Update mechanism for mutual exclusion (tree-based version)
4  *
5  * Copyright IBM Corporation, 2008
6  *
7  * Authors: Dipankar Sarma <dipankar@in.ibm.com>
8  *	    Manfred Spraul <manfred@colorfullife.com>
9  *	    Paul E. McKenney <paulmck@linux.ibm.com>
10  *
11  * Based on the original work by Paul McKenney <paulmck@linux.ibm.com>
12  * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
13  *
14  * For detailed explanation of Read-Copy Update mechanism see -
15  *	Documentation/RCU
16  */
17 
18 #define pr_fmt(fmt) "rcu: " fmt
19 
20 #include <linux/types.h>
21 #include <linux/kernel.h>
22 #include <linux/init.h>
23 #include <linux/spinlock.h>
24 #include <linux/smp.h>
25 #include <linux/rcupdate_wait.h>
26 #include <linux/interrupt.h>
27 #include <linux/sched.h>
28 #include <linux/sched/debug.h>
29 #include <linux/nmi.h>
30 #include <linux/atomic.h>
31 #include <linux/bitops.h>
32 #include <linux/export.h>
33 #include <linux/completion.h>
34 #include <linux/kmemleak.h>
35 #include <linux/moduleparam.h>
36 #include <linux/panic.h>
37 #include <linux/panic_notifier.h>
38 #include <linux/percpu.h>
39 #include <linux/notifier.h>
40 #include <linux/cpu.h>
41 #include <linux/mutex.h>
42 #include <linux/time.h>
43 #include <linux/kernel_stat.h>
44 #include <linux/wait.h>
45 #include <linux/kthread.h>
46 #include <uapi/linux/sched/types.h>
47 #include <linux/prefetch.h>
48 #include <linux/delay.h>
49 #include <linux/random.h>
50 #include <linux/trace_events.h>
51 #include <linux/suspend.h>
52 #include <linux/ftrace.h>
53 #include <linux/tick.h>
54 #include <linux/sysrq.h>
55 #include <linux/kprobes.h>
56 #include <linux/gfp.h>
57 #include <linux/oom.h>
58 #include <linux/smpboot.h>
59 #include <linux/jiffies.h>
60 #include <linux/slab.h>
61 #include <linux/sched/isolation.h>
62 #include <linux/sched/clock.h>
63 #include <linux/vmalloc.h>
64 #include <linux/mm.h>
65 #include <linux/kasan.h>
66 #include <linux/context_tracking.h>
67 #include "../time/tick-internal.h"
68 
69 #include "tree.h"
70 #include "rcu.h"
71 
72 #ifdef MODULE_PARAM_PREFIX
73 #undef MODULE_PARAM_PREFIX
74 #endif
75 #define MODULE_PARAM_PREFIX "rcutree."
76 
77 /* Data structures. */
78 
79 static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = {
80 	.gpwrap = true,
81 #ifdef CONFIG_RCU_NOCB_CPU
82 	.cblist.flags = SEGCBLIST_RCU_CORE,
83 #endif
84 };
85 static struct rcu_state rcu_state = {
86 	.level = { &rcu_state.node[0] },
87 	.gp_state = RCU_GP_IDLE,
88 	.gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT,
89 	.barrier_mutex = __MUTEX_INITIALIZER(rcu_state.barrier_mutex),
90 	.barrier_lock = __RAW_SPIN_LOCK_UNLOCKED(rcu_state.barrier_lock),
91 	.name = RCU_NAME,
92 	.abbr = RCU_ABBR,
93 	.exp_mutex = __MUTEX_INITIALIZER(rcu_state.exp_mutex),
94 	.exp_wake_mutex = __MUTEX_INITIALIZER(rcu_state.exp_wake_mutex),
95 	.ofl_lock = __ARCH_SPIN_LOCK_UNLOCKED,
96 };
97 
98 /* Dump rcu_node combining tree at boot to verify correct setup. */
99 static bool dump_tree;
100 module_param(dump_tree, bool, 0444);
101 /* By default, use RCU_SOFTIRQ instead of rcuc kthreads. */
102 static bool use_softirq = !IS_ENABLED(CONFIG_PREEMPT_RT);
103 #ifndef CONFIG_PREEMPT_RT
104 module_param(use_softirq, bool, 0444);
105 #endif
106 /* Control rcu_node-tree auto-balancing at boot time. */
107 static bool rcu_fanout_exact;
108 module_param(rcu_fanout_exact, bool, 0444);
109 /* Increase (but not decrease) the RCU_FANOUT_LEAF at boot time. */
110 static int rcu_fanout_leaf = RCU_FANOUT_LEAF;
111 module_param(rcu_fanout_leaf, int, 0444);
112 int rcu_num_lvls __read_mostly = RCU_NUM_LVLS;
113 /* Number of rcu_nodes at specified level. */
114 int num_rcu_lvl[] = NUM_RCU_LVL_INIT;
115 int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
116 
117 /*
118  * The rcu_scheduler_active variable is initialized to the value
119  * RCU_SCHEDULER_INACTIVE and transitions RCU_SCHEDULER_INIT just before the
120  * first task is spawned.  So when this variable is RCU_SCHEDULER_INACTIVE,
121  * RCU can assume that there is but one task, allowing RCU to (for example)
122  * optimize synchronize_rcu() to a simple barrier().  When this variable
123  * is RCU_SCHEDULER_INIT, RCU must actually do all the hard work required
124  * to detect real grace periods.  This variable is also used to suppress
125  * boot-time false positives from lockdep-RCU error checking.  Finally, it
126  * transitions from RCU_SCHEDULER_INIT to RCU_SCHEDULER_RUNNING after RCU
127  * is fully initialized, including all of its kthreads having been spawned.
128  */
129 int rcu_scheduler_active __read_mostly;
130 EXPORT_SYMBOL_GPL(rcu_scheduler_active);
131 
132 /*
133  * The rcu_scheduler_fully_active variable transitions from zero to one
134  * during the early_initcall() processing, which is after the scheduler
135  * is capable of creating new tasks.  So RCU processing (for example,
136  * creating tasks for RCU priority boosting) must be delayed until after
137  * rcu_scheduler_fully_active transitions from zero to one.  We also
138  * currently delay invocation of any RCU callbacks until after this point.
139  *
140  * It might later prove better for people registering RCU callbacks during
141  * early boot to take responsibility for these callbacks, but one step at
142  * a time.
143  */
144 static int rcu_scheduler_fully_active __read_mostly;
145 
146 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
147 			      unsigned long gps, unsigned long flags);
148 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
149 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
150 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
151 static void invoke_rcu_core(void);
152 static void rcu_report_exp_rdp(struct rcu_data *rdp);
153 static void sync_sched_exp_online_cleanup(int cpu);
154 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp);
155 static bool rcu_rdp_is_offloaded(struct rcu_data *rdp);
156 
157 /*
158  * rcuc/rcub/rcuop kthread realtime priority. The "rcuop"
159  * real-time priority(enabling/disabling) is controlled by
160  * the extra CONFIG_RCU_NOCB_CPU_CB_BOOST configuration.
161  */
162 static int kthread_prio = IS_ENABLED(CONFIG_RCU_BOOST) ? 1 : 0;
163 module_param(kthread_prio, int, 0444);
164 
165 /* Delay in jiffies for grace-period initialization delays, debug only. */
166 
167 static int gp_preinit_delay;
168 module_param(gp_preinit_delay, int, 0444);
169 static int gp_init_delay;
170 module_param(gp_init_delay, int, 0444);
171 static int gp_cleanup_delay;
172 module_param(gp_cleanup_delay, int, 0444);
173 
174 // Add delay to rcu_read_unlock() for strict grace periods.
175 static int rcu_unlock_delay;
176 #ifdef CONFIG_RCU_STRICT_GRACE_PERIOD
177 module_param(rcu_unlock_delay, int, 0444);
178 #endif
179 
180 /*
181  * This rcu parameter is runtime-read-only. It reflects
182  * a minimum allowed number of objects which can be cached
183  * per-CPU. Object size is equal to one page. This value
184  * can be changed at boot time.
185  */
186 static int rcu_min_cached_objs = 5;
187 module_param(rcu_min_cached_objs, int, 0444);
188 
189 // A page shrinker can ask for pages to be freed to make them
190 // available for other parts of the system. This usually happens
191 // under low memory conditions, and in that case we should also
192 // defer page-cache filling for a short time period.
193 //
194 // The default value is 5 seconds, which is long enough to reduce
195 // interference with the shrinker while it asks other systems to
196 // drain their caches.
197 static int rcu_delay_page_cache_fill_msec = 5000;
198 module_param(rcu_delay_page_cache_fill_msec, int, 0444);
199 
200 /* Retrieve RCU kthreads priority for rcutorture */
rcu_get_gp_kthreads_prio(void)201 int rcu_get_gp_kthreads_prio(void)
202 {
203 	return kthread_prio;
204 }
205 EXPORT_SYMBOL_GPL(rcu_get_gp_kthreads_prio);
206 
207 /*
208  * Number of grace periods between delays, normalized by the duration of
209  * the delay.  The longer the delay, the more the grace periods between
210  * each delay.  The reason for this normalization is that it means that,
211  * for non-zero delays, the overall slowdown of grace periods is constant
212  * regardless of the duration of the delay.  This arrangement balances
213  * the need for long delays to increase some race probabilities with the
214  * need for fast grace periods to increase other race probabilities.
215  */
216 #define PER_RCU_NODE_PERIOD 3	/* Number of grace periods between delays for debugging. */
217 
218 /*
219  * Compute the mask of online CPUs for the specified rcu_node structure.
220  * This will not be stable unless the rcu_node structure's ->lock is
221  * held, but the bit corresponding to the current CPU will be stable
222  * in most contexts.
223  */
rcu_rnp_online_cpus(struct rcu_node * rnp)224 static unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
225 {
226 	return READ_ONCE(rnp->qsmaskinitnext);
227 }
228 
229 /*
230  * Is the CPU corresponding to the specified rcu_data structure online
231  * from RCU's perspective?  This perspective is given by that structure's
232  * ->qsmaskinitnext field rather than by the global cpu_online_mask.
233  */
rcu_rdp_cpu_online(struct rcu_data * rdp)234 static bool rcu_rdp_cpu_online(struct rcu_data *rdp)
235 {
236 	return !!(rdp->grpmask & rcu_rnp_online_cpus(rdp->mynode));
237 }
238 
239 /*
240  * Return true if an RCU grace period is in progress.  The READ_ONCE()s
241  * permit this function to be invoked without holding the root rcu_node
242  * structure's ->lock, but of course results can be subject to change.
243  */
rcu_gp_in_progress(void)244 static int rcu_gp_in_progress(void)
245 {
246 	return rcu_seq_state(rcu_seq_current(&rcu_state.gp_seq));
247 }
248 
249 /*
250  * Return the number of callbacks queued on the specified CPU.
251  * Handles both the nocbs and normal cases.
252  */
rcu_get_n_cbs_cpu(int cpu)253 static long rcu_get_n_cbs_cpu(int cpu)
254 {
255 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
256 
257 	if (rcu_segcblist_is_enabled(&rdp->cblist))
258 		return rcu_segcblist_n_cbs(&rdp->cblist);
259 	return 0;
260 }
261 
rcu_softirq_qs(void)262 void rcu_softirq_qs(void)
263 {
264 	rcu_qs();
265 	rcu_preempt_deferred_qs(current);
266 	rcu_tasks_qs(current, false);
267 }
268 
269 /*
270  * Reset the current CPU's ->dynticks counter to indicate that the
271  * newly onlined CPU is no longer in an extended quiescent state.
272  * This will either leave the counter unchanged, or increment it
273  * to the next non-quiescent value.
274  *
275  * The non-atomic test/increment sequence works because the upper bits
276  * of the ->dynticks counter are manipulated only by the corresponding CPU,
277  * or when the corresponding CPU is offline.
278  */
rcu_dynticks_eqs_online(void)279 static void rcu_dynticks_eqs_online(void)
280 {
281 	if (ct_dynticks() & RCU_DYNTICKS_IDX)
282 		return;
283 	ct_state_inc(RCU_DYNTICKS_IDX);
284 }
285 
286 /*
287  * Snapshot the ->dynticks counter with full ordering so as to allow
288  * stable comparison of this counter with past and future snapshots.
289  */
rcu_dynticks_snap(int cpu)290 static int rcu_dynticks_snap(int cpu)
291 {
292 	smp_mb();  // Fundamental RCU ordering guarantee.
293 	return ct_dynticks_cpu_acquire(cpu);
294 }
295 
296 /*
297  * Return true if the snapshot returned from rcu_dynticks_snap()
298  * indicates that RCU is in an extended quiescent state.
299  */
rcu_dynticks_in_eqs(int snap)300 static bool rcu_dynticks_in_eqs(int snap)
301 {
302 	return !(snap & RCU_DYNTICKS_IDX);
303 }
304 
305 /* Return true if the specified CPU is currently idle from an RCU viewpoint.  */
rcu_is_idle_cpu(int cpu)306 bool rcu_is_idle_cpu(int cpu)
307 {
308 	return rcu_dynticks_in_eqs(rcu_dynticks_snap(cpu));
309 }
310 
311 /*
312  * Return true if the CPU corresponding to the specified rcu_data
313  * structure has spent some time in an extended quiescent state since
314  * rcu_dynticks_snap() returned the specified snapshot.
315  */
rcu_dynticks_in_eqs_since(struct rcu_data * rdp,int snap)316 static bool rcu_dynticks_in_eqs_since(struct rcu_data *rdp, int snap)
317 {
318 	return snap != rcu_dynticks_snap(rdp->cpu);
319 }
320 
321 /*
322  * Return true if the referenced integer is zero while the specified
323  * CPU remains within a single extended quiescent state.
324  */
rcu_dynticks_zero_in_eqs(int cpu,int * vp)325 bool rcu_dynticks_zero_in_eqs(int cpu, int *vp)
326 {
327 	int snap;
328 
329 	// If not quiescent, force back to earlier extended quiescent state.
330 	snap = ct_dynticks_cpu(cpu) & ~RCU_DYNTICKS_IDX;
331 	smp_rmb(); // Order ->dynticks and *vp reads.
332 	if (READ_ONCE(*vp))
333 		return false;  // Non-zero, so report failure;
334 	smp_rmb(); // Order *vp read and ->dynticks re-read.
335 
336 	// If still in the same extended quiescent state, we are good!
337 	return snap == ct_dynticks_cpu(cpu);
338 }
339 
340 /*
341  * Let the RCU core know that this CPU has gone through the scheduler,
342  * which is a quiescent state.  This is called when the need for a
343  * quiescent state is urgent, so we burn an atomic operation and full
344  * memory barriers to let the RCU core know about it, regardless of what
345  * this CPU might (or might not) do in the near future.
346  *
347  * We inform the RCU core by emulating a zero-duration dyntick-idle period.
348  *
349  * The caller must have disabled interrupts and must not be idle.
350  */
rcu_momentary_dyntick_idle(void)351 notrace void rcu_momentary_dyntick_idle(void)
352 {
353 	int seq;
354 
355 	raw_cpu_write(rcu_data.rcu_need_heavy_qs, false);
356 	seq = ct_state_inc(2 * RCU_DYNTICKS_IDX);
357 	/* It is illegal to call this from idle state. */
358 	WARN_ON_ONCE(!(seq & RCU_DYNTICKS_IDX));
359 	rcu_preempt_deferred_qs(current);
360 }
361 EXPORT_SYMBOL_GPL(rcu_momentary_dyntick_idle);
362 
363 /**
364  * rcu_is_cpu_rrupt_from_idle - see if 'interrupted' from idle
365  *
366  * If the current CPU is idle and running at a first-level (not nested)
367  * interrupt, or directly, from idle, return true.
368  *
369  * The caller must have at least disabled IRQs.
370  */
rcu_is_cpu_rrupt_from_idle(void)371 static int rcu_is_cpu_rrupt_from_idle(void)
372 {
373 	long nesting;
374 
375 	/*
376 	 * Usually called from the tick; but also used from smp_function_call()
377 	 * for expedited grace periods. This latter can result in running from
378 	 * the idle task, instead of an actual IPI.
379 	 */
380 	lockdep_assert_irqs_disabled();
381 
382 	/* Check for counter underflows */
383 	RCU_LOCKDEP_WARN(ct_dynticks_nesting() < 0,
384 			 "RCU dynticks_nesting counter underflow!");
385 	RCU_LOCKDEP_WARN(ct_dynticks_nmi_nesting() <= 0,
386 			 "RCU dynticks_nmi_nesting counter underflow/zero!");
387 
388 	/* Are we at first interrupt nesting level? */
389 	nesting = ct_dynticks_nmi_nesting();
390 	if (nesting > 1)
391 		return false;
392 
393 	/*
394 	 * If we're not in an interrupt, we must be in the idle task!
395 	 */
396 	WARN_ON_ONCE(!nesting && !is_idle_task(current));
397 
398 	/* Does CPU appear to be idle from an RCU standpoint? */
399 	return ct_dynticks_nesting() == 0;
400 }
401 
402 #define DEFAULT_RCU_BLIMIT (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 1000 : 10)
403 				// Maximum callbacks per rcu_do_batch ...
404 #define DEFAULT_MAX_RCU_BLIMIT 10000 // ... even during callback flood.
405 static long blimit = DEFAULT_RCU_BLIMIT;
406 #define DEFAULT_RCU_QHIMARK 10000 // If this many pending, ignore blimit.
407 static long qhimark = DEFAULT_RCU_QHIMARK;
408 #define DEFAULT_RCU_QLOMARK 100   // Once only this many pending, use blimit.
409 static long qlowmark = DEFAULT_RCU_QLOMARK;
410 #define DEFAULT_RCU_QOVLD_MULT 2
411 #define DEFAULT_RCU_QOVLD (DEFAULT_RCU_QOVLD_MULT * DEFAULT_RCU_QHIMARK)
412 static long qovld = DEFAULT_RCU_QOVLD; // If this many pending, hammer QS.
413 static long qovld_calc = -1;	  // No pre-initialization lock acquisitions!
414 
415 module_param(blimit, long, 0444);
416 module_param(qhimark, long, 0444);
417 module_param(qlowmark, long, 0444);
418 module_param(qovld, long, 0444);
419 
420 static ulong jiffies_till_first_fqs = IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 0 : ULONG_MAX;
421 static ulong jiffies_till_next_fqs = ULONG_MAX;
422 static bool rcu_kick_kthreads;
423 static int rcu_divisor = 7;
424 module_param(rcu_divisor, int, 0644);
425 
426 /* Force an exit from rcu_do_batch() after 3 milliseconds. */
427 static long rcu_resched_ns = 3 * NSEC_PER_MSEC;
428 module_param(rcu_resched_ns, long, 0644);
429 
430 /*
431  * How long the grace period must be before we start recruiting
432  * quiescent-state help from rcu_note_context_switch().
433  */
434 static ulong jiffies_till_sched_qs = ULONG_MAX;
435 module_param(jiffies_till_sched_qs, ulong, 0444);
436 static ulong jiffies_to_sched_qs; /* See adjust_jiffies_till_sched_qs(). */
437 module_param(jiffies_to_sched_qs, ulong, 0444); /* Display only! */
438 
439 /*
440  * Make sure that we give the grace-period kthread time to detect any
441  * idle CPUs before taking active measures to force quiescent states.
442  * However, don't go below 100 milliseconds, adjusted upwards for really
443  * large systems.
444  */
adjust_jiffies_till_sched_qs(void)445 static void adjust_jiffies_till_sched_qs(void)
446 {
447 	unsigned long j;
448 
449 	/* If jiffies_till_sched_qs was specified, respect the request. */
450 	if (jiffies_till_sched_qs != ULONG_MAX) {
451 		WRITE_ONCE(jiffies_to_sched_qs, jiffies_till_sched_qs);
452 		return;
453 	}
454 	/* Otherwise, set to third fqs scan, but bound below on large system. */
455 	j = READ_ONCE(jiffies_till_first_fqs) +
456 		      2 * READ_ONCE(jiffies_till_next_fqs);
457 	if (j < HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV)
458 		j = HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
459 	pr_info("RCU calculated value of scheduler-enlistment delay is %ld jiffies.\n", j);
460 	WRITE_ONCE(jiffies_to_sched_qs, j);
461 }
462 
param_set_first_fqs_jiffies(const char * val,const struct kernel_param * kp)463 static int param_set_first_fqs_jiffies(const char *val, const struct kernel_param *kp)
464 {
465 	ulong j;
466 	int ret = kstrtoul(val, 0, &j);
467 
468 	if (!ret) {
469 		WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : j);
470 		adjust_jiffies_till_sched_qs();
471 	}
472 	return ret;
473 }
474 
param_set_next_fqs_jiffies(const char * val,const struct kernel_param * kp)475 static int param_set_next_fqs_jiffies(const char *val, const struct kernel_param *kp)
476 {
477 	ulong j;
478 	int ret = kstrtoul(val, 0, &j);
479 
480 	if (!ret) {
481 		WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : (j ?: 1));
482 		adjust_jiffies_till_sched_qs();
483 	}
484 	return ret;
485 }
486 
487 static const struct kernel_param_ops first_fqs_jiffies_ops = {
488 	.set = param_set_first_fqs_jiffies,
489 	.get = param_get_ulong,
490 };
491 
492 static const struct kernel_param_ops next_fqs_jiffies_ops = {
493 	.set = param_set_next_fqs_jiffies,
494 	.get = param_get_ulong,
495 };
496 
497 module_param_cb(jiffies_till_first_fqs, &first_fqs_jiffies_ops, &jiffies_till_first_fqs, 0644);
498 module_param_cb(jiffies_till_next_fqs, &next_fqs_jiffies_ops, &jiffies_till_next_fqs, 0644);
499 module_param(rcu_kick_kthreads, bool, 0644);
500 
501 static void force_qs_rnp(int (*f)(struct rcu_data *rdp));
502 static int rcu_pending(int user);
503 
504 /*
505  * Return the number of RCU GPs completed thus far for debug & stats.
506  */
rcu_get_gp_seq(void)507 unsigned long rcu_get_gp_seq(void)
508 {
509 	return READ_ONCE(rcu_state.gp_seq);
510 }
511 EXPORT_SYMBOL_GPL(rcu_get_gp_seq);
512 
513 /*
514  * Return the number of RCU expedited batches completed thus far for
515  * debug & stats.  Odd numbers mean that a batch is in progress, even
516  * numbers mean idle.  The value returned will thus be roughly double
517  * the cumulative batches since boot.
518  */
rcu_exp_batches_completed(void)519 unsigned long rcu_exp_batches_completed(void)
520 {
521 	return rcu_state.expedited_sequence;
522 }
523 EXPORT_SYMBOL_GPL(rcu_exp_batches_completed);
524 
525 /*
526  * Return the root node of the rcu_state structure.
527  */
rcu_get_root(void)528 static struct rcu_node *rcu_get_root(void)
529 {
530 	return &rcu_state.node[0];
531 }
532 
533 /*
534  * Send along grace-period-related data for rcutorture diagnostics.
535  */
rcutorture_get_gp_data(enum rcutorture_type test_type,int * flags,unsigned long * gp_seq)536 void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
537 			    unsigned long *gp_seq)
538 {
539 	switch (test_type) {
540 	case RCU_FLAVOR:
541 		*flags = READ_ONCE(rcu_state.gp_flags);
542 		*gp_seq = rcu_seq_current(&rcu_state.gp_seq);
543 		break;
544 	default:
545 		break;
546 	}
547 }
548 EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
549 
550 #if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK))
551 /*
552  * An empty function that will trigger a reschedule on
553  * IRQ tail once IRQs get re-enabled on userspace/guest resume.
554  */
late_wakeup_func(struct irq_work * work)555 static void late_wakeup_func(struct irq_work *work)
556 {
557 }
558 
559 static DEFINE_PER_CPU(struct irq_work, late_wakeup_work) =
560 	IRQ_WORK_INIT(late_wakeup_func);
561 
562 /*
563  * If either:
564  *
565  * 1) the task is about to enter in guest mode and $ARCH doesn't support KVM generic work
566  * 2) the task is about to enter in user mode and $ARCH doesn't support generic entry.
567  *
568  * In these cases the late RCU wake ups aren't supported in the resched loops and our
569  * last resort is to fire a local irq_work that will trigger a reschedule once IRQs
570  * get re-enabled again.
571  */
rcu_irq_work_resched(void)572 noinstr void rcu_irq_work_resched(void)
573 {
574 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
575 
576 	if (IS_ENABLED(CONFIG_GENERIC_ENTRY) && !(current->flags & PF_VCPU))
577 		return;
578 
579 	if (IS_ENABLED(CONFIG_KVM_XFER_TO_GUEST_WORK) && (current->flags & PF_VCPU))
580 		return;
581 
582 	instrumentation_begin();
583 	if (do_nocb_deferred_wakeup(rdp) && need_resched()) {
584 		irq_work_queue(this_cpu_ptr(&late_wakeup_work));
585 	}
586 	instrumentation_end();
587 }
588 #endif /* #if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK)) */
589 
590 #ifdef CONFIG_PROVE_RCU
591 /**
592  * rcu_irq_exit_check_preempt - Validate that scheduling is possible
593  */
rcu_irq_exit_check_preempt(void)594 void rcu_irq_exit_check_preempt(void)
595 {
596 	lockdep_assert_irqs_disabled();
597 
598 	RCU_LOCKDEP_WARN(ct_dynticks_nesting() <= 0,
599 			 "RCU dynticks_nesting counter underflow/zero!");
600 	RCU_LOCKDEP_WARN(ct_dynticks_nmi_nesting() !=
601 			 DYNTICK_IRQ_NONIDLE,
602 			 "Bad RCU  dynticks_nmi_nesting counter\n");
603 	RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
604 			 "RCU in extended quiescent state!");
605 }
606 #endif /* #ifdef CONFIG_PROVE_RCU */
607 
608 #ifdef CONFIG_NO_HZ_FULL
609 /**
610  * __rcu_irq_enter_check_tick - Enable scheduler tick on CPU if RCU needs it.
611  *
612  * The scheduler tick is not normally enabled when CPUs enter the kernel
613  * from nohz_full userspace execution.  After all, nohz_full userspace
614  * execution is an RCU quiescent state and the time executing in the kernel
615  * is quite short.  Except of course when it isn't.  And it is not hard to
616  * cause a large system to spend tens of seconds or even minutes looping
617  * in the kernel, which can cause a number of problems, include RCU CPU
618  * stall warnings.
619  *
620  * Therefore, if a nohz_full CPU fails to report a quiescent state
621  * in a timely manner, the RCU grace-period kthread sets that CPU's
622  * ->rcu_urgent_qs flag with the expectation that the next interrupt or
623  * exception will invoke this function, which will turn on the scheduler
624  * tick, which will enable RCU to detect that CPU's quiescent states,
625  * for example, due to cond_resched() calls in CONFIG_PREEMPT=n kernels.
626  * The tick will be disabled once a quiescent state is reported for
627  * this CPU.
628  *
629  * Of course, in carefully tuned systems, there might never be an
630  * interrupt or exception.  In that case, the RCU grace-period kthread
631  * will eventually cause one to happen.  However, in less carefully
632  * controlled environments, this function allows RCU to get what it
633  * needs without creating otherwise useless interruptions.
634  */
__rcu_irq_enter_check_tick(void)635 void __rcu_irq_enter_check_tick(void)
636 {
637 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
638 
639 	// If we're here from NMI there's nothing to do.
640 	if (in_nmi())
641 		return;
642 
643 	RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
644 			 "Illegal rcu_irq_enter_check_tick() from extended quiescent state");
645 
646 	if (!tick_nohz_full_cpu(rdp->cpu) ||
647 	    !READ_ONCE(rdp->rcu_urgent_qs) ||
648 	    READ_ONCE(rdp->rcu_forced_tick)) {
649 		// RCU doesn't need nohz_full help from this CPU, or it is
650 		// already getting that help.
651 		return;
652 	}
653 
654 	// We get here only when not in an extended quiescent state and
655 	// from interrupts (as opposed to NMIs).  Therefore, (1) RCU is
656 	// already watching and (2) The fact that we are in an interrupt
657 	// handler and that the rcu_node lock is an irq-disabled lock
658 	// prevents self-deadlock.  So we can safely recheck under the lock.
659 	// Note that the nohz_full state currently cannot change.
660 	raw_spin_lock_rcu_node(rdp->mynode);
661 	if (rdp->rcu_urgent_qs && !rdp->rcu_forced_tick) {
662 		// A nohz_full CPU is in the kernel and RCU needs a
663 		// quiescent state.  Turn on the tick!
664 		WRITE_ONCE(rdp->rcu_forced_tick, true);
665 		tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
666 	}
667 	raw_spin_unlock_rcu_node(rdp->mynode);
668 }
669 NOKPROBE_SYMBOL(__rcu_irq_enter_check_tick);
670 #endif /* CONFIG_NO_HZ_FULL */
671 
672 /*
673  * Check to see if any future non-offloaded RCU-related work will need
674  * to be done by the current CPU, even if none need be done immediately,
675  * returning 1 if so.  This function is part of the RCU implementation;
676  * it is -not- an exported member of the RCU API.  This is used by
677  * the idle-entry code to figure out whether it is safe to disable the
678  * scheduler-clock interrupt.
679  *
680  * Just check whether or not this CPU has non-offloaded RCU callbacks
681  * queued.
682  */
rcu_needs_cpu(void)683 int rcu_needs_cpu(void)
684 {
685 	return !rcu_segcblist_empty(&this_cpu_ptr(&rcu_data)->cblist) &&
686 		!rcu_rdp_is_offloaded(this_cpu_ptr(&rcu_data));
687 }
688 
689 /*
690  * If any sort of urgency was applied to the current CPU (for example,
691  * the scheduler-clock interrupt was enabled on a nohz_full CPU) in order
692  * to get to a quiescent state, disable it.
693  */
rcu_disable_urgency_upon_qs(struct rcu_data * rdp)694 static void rcu_disable_urgency_upon_qs(struct rcu_data *rdp)
695 {
696 	raw_lockdep_assert_held_rcu_node(rdp->mynode);
697 	WRITE_ONCE(rdp->rcu_urgent_qs, false);
698 	WRITE_ONCE(rdp->rcu_need_heavy_qs, false);
699 	if (tick_nohz_full_cpu(rdp->cpu) && rdp->rcu_forced_tick) {
700 		tick_dep_clear_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
701 		WRITE_ONCE(rdp->rcu_forced_tick, false);
702 	}
703 }
704 
705 /**
706  * rcu_is_watching - see if RCU thinks that the current CPU is not idle
707  *
708  * Return true if RCU is watching the running CPU, which means that this
709  * CPU can safely enter RCU read-side critical sections.  In other words,
710  * if the current CPU is not in its idle loop or is in an interrupt or
711  * NMI handler, return true.
712  *
713  * Make notrace because it can be called by the internal functions of
714  * ftrace, and making this notrace removes unnecessary recursion calls.
715  */
rcu_is_watching(void)716 notrace bool rcu_is_watching(void)
717 {
718 	bool ret;
719 
720 	preempt_disable_notrace();
721 	ret = !rcu_dynticks_curr_cpu_in_eqs();
722 	preempt_enable_notrace();
723 	return ret;
724 }
725 EXPORT_SYMBOL_GPL(rcu_is_watching);
726 
727 /*
728  * If a holdout task is actually running, request an urgent quiescent
729  * state from its CPU.  This is unsynchronized, so migrations can cause
730  * the request to go to the wrong CPU.  Which is OK, all that will happen
731  * is that the CPU's next context switch will be a bit slower and next
732  * time around this task will generate another request.
733  */
rcu_request_urgent_qs_task(struct task_struct * t)734 void rcu_request_urgent_qs_task(struct task_struct *t)
735 {
736 	int cpu;
737 
738 	barrier();
739 	cpu = task_cpu(t);
740 	if (!task_curr(t))
741 		return; /* This task is not running on that CPU. */
742 	smp_store_release(per_cpu_ptr(&rcu_data.rcu_urgent_qs, cpu), true);
743 }
744 
745 #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
746 
747 /*
748  * Is the current CPU online as far as RCU is concerned?
749  *
750  * Disable preemption to avoid false positives that could otherwise
751  * happen due to the current CPU number being sampled, this task being
752  * preempted, its old CPU being taken offline, resuming on some other CPU,
753  * then determining that its old CPU is now offline.
754  *
755  * Disable checking if in an NMI handler because we cannot safely
756  * report errors from NMI handlers anyway.  In addition, it is OK to use
757  * RCU on an offline processor during initial boot, hence the check for
758  * rcu_scheduler_fully_active.
759  */
rcu_lockdep_current_cpu_online(void)760 bool rcu_lockdep_current_cpu_online(void)
761 {
762 	struct rcu_data *rdp;
763 	bool ret = false;
764 
765 	if (in_nmi() || !rcu_scheduler_fully_active)
766 		return true;
767 	preempt_disable_notrace();
768 	rdp = this_cpu_ptr(&rcu_data);
769 	/*
770 	 * Strictly, we care here about the case where the current CPU is
771 	 * in rcu_cpu_starting() and thus has an excuse for rdp->grpmask
772 	 * not being up to date. So arch_spin_is_locked() might have a
773 	 * false positive if it's held by some *other* CPU, but that's
774 	 * OK because that just means a false *negative* on the warning.
775 	 */
776 	if (rcu_rdp_cpu_online(rdp) || arch_spin_is_locked(&rcu_state.ofl_lock))
777 		ret = true;
778 	preempt_enable_notrace();
779 	return ret;
780 }
781 EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
782 
783 #endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */
784 
785 /*
786  * When trying to report a quiescent state on behalf of some other CPU,
787  * it is our responsibility to check for and handle potential overflow
788  * of the rcu_node ->gp_seq counter with respect to the rcu_data counters.
789  * After all, the CPU might be in deep idle state, and thus executing no
790  * code whatsoever.
791  */
rcu_gpnum_ovf(struct rcu_node * rnp,struct rcu_data * rdp)792 static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp)
793 {
794 	raw_lockdep_assert_held_rcu_node(rnp);
795 	if (ULONG_CMP_LT(rcu_seq_current(&rdp->gp_seq) + ULONG_MAX / 4,
796 			 rnp->gp_seq))
797 		WRITE_ONCE(rdp->gpwrap, true);
798 	if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq))
799 		rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4;
800 }
801 
802 /*
803  * Snapshot the specified CPU's dynticks counter so that we can later
804  * credit them with an implicit quiescent state.  Return 1 if this CPU
805  * is in dynticks idle mode, which is an extended quiescent state.
806  */
dyntick_save_progress_counter(struct rcu_data * rdp)807 static int dyntick_save_progress_counter(struct rcu_data *rdp)
808 {
809 	rdp->dynticks_snap = rcu_dynticks_snap(rdp->cpu);
810 	if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) {
811 		trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
812 		rcu_gpnum_ovf(rdp->mynode, rdp);
813 		return 1;
814 	}
815 	return 0;
816 }
817 
818 /*
819  * Return true if the specified CPU has passed through a quiescent
820  * state by virtue of being in or having passed through an dynticks
821  * idle state since the last call to dyntick_save_progress_counter()
822  * for this same CPU, or by virtue of having been offline.
823  */
rcu_implicit_dynticks_qs(struct rcu_data * rdp)824 static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
825 {
826 	unsigned long jtsq;
827 	struct rcu_node *rnp = rdp->mynode;
828 
829 	/*
830 	 * If the CPU passed through or entered a dynticks idle phase with
831 	 * no active irq/NMI handlers, then we can safely pretend that the CPU
832 	 * already acknowledged the request to pass through a quiescent
833 	 * state.  Either way, that CPU cannot possibly be in an RCU
834 	 * read-side critical section that started before the beginning
835 	 * of the current RCU grace period.
836 	 */
837 	if (rcu_dynticks_in_eqs_since(rdp, rdp->dynticks_snap)) {
838 		trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
839 		rcu_gpnum_ovf(rnp, rdp);
840 		return 1;
841 	}
842 
843 	/*
844 	 * Complain if a CPU that is considered to be offline from RCU's
845 	 * perspective has not yet reported a quiescent state.  After all,
846 	 * the offline CPU should have reported a quiescent state during
847 	 * the CPU-offline process, or, failing that, by rcu_gp_init()
848 	 * if it ran concurrently with either the CPU going offline or the
849 	 * last task on a leaf rcu_node structure exiting its RCU read-side
850 	 * critical section while all CPUs corresponding to that structure
851 	 * are offline.  This added warning detects bugs in any of these
852 	 * code paths.
853 	 *
854 	 * The rcu_node structure's ->lock is held here, which excludes
855 	 * the relevant portions the CPU-hotplug code, the grace-period
856 	 * initialization code, and the rcu_read_unlock() code paths.
857 	 *
858 	 * For more detail, please refer to the "Hotplug CPU" section
859 	 * of RCU's Requirements documentation.
860 	 */
861 	if (WARN_ON_ONCE(!rcu_rdp_cpu_online(rdp))) {
862 		struct rcu_node *rnp1;
863 
864 		pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n",
865 			__func__, rnp->grplo, rnp->grphi, rnp->level,
866 			(long)rnp->gp_seq, (long)rnp->completedqs);
867 		for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent)
868 			pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx ->rcu_gp_init_mask %#lx\n",
869 				__func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext, rnp1->rcu_gp_init_mask);
870 		pr_info("%s %d: %c online: %ld(%d) offline: %ld(%d)\n",
871 			__func__, rdp->cpu, ".o"[rcu_rdp_cpu_online(rdp)],
872 			(long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags,
873 			(long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags);
874 		return 1; /* Break things loose after complaining. */
875 	}
876 
877 	/*
878 	 * A CPU running for an extended time within the kernel can
879 	 * delay RCU grace periods: (1) At age jiffies_to_sched_qs,
880 	 * set .rcu_urgent_qs, (2) At age 2*jiffies_to_sched_qs, set
881 	 * both .rcu_need_heavy_qs and .rcu_urgent_qs.  Note that the
882 	 * unsynchronized assignments to the per-CPU rcu_need_heavy_qs
883 	 * variable are safe because the assignments are repeated if this
884 	 * CPU failed to pass through a quiescent state.  This code
885 	 * also checks .jiffies_resched in case jiffies_to_sched_qs
886 	 * is set way high.
887 	 */
888 	jtsq = READ_ONCE(jiffies_to_sched_qs);
889 	if (!READ_ONCE(rdp->rcu_need_heavy_qs) &&
890 	    (time_after(jiffies, rcu_state.gp_start + jtsq * 2) ||
891 	     time_after(jiffies, rcu_state.jiffies_resched) ||
892 	     rcu_state.cbovld)) {
893 		WRITE_ONCE(rdp->rcu_need_heavy_qs, true);
894 		/* Store rcu_need_heavy_qs before rcu_urgent_qs. */
895 		smp_store_release(&rdp->rcu_urgent_qs, true);
896 	} else if (time_after(jiffies, rcu_state.gp_start + jtsq)) {
897 		WRITE_ONCE(rdp->rcu_urgent_qs, true);
898 	}
899 
900 	/*
901 	 * NO_HZ_FULL CPUs can run in-kernel without rcu_sched_clock_irq!
902 	 * The above code handles this, but only for straight cond_resched().
903 	 * And some in-kernel loops check need_resched() before calling
904 	 * cond_resched(), which defeats the above code for CPUs that are
905 	 * running in-kernel with scheduling-clock interrupts disabled.
906 	 * So hit them over the head with the resched_cpu() hammer!
907 	 */
908 	if (tick_nohz_full_cpu(rdp->cpu) &&
909 	    (time_after(jiffies, READ_ONCE(rdp->last_fqs_resched) + jtsq * 3) ||
910 	     rcu_state.cbovld)) {
911 		WRITE_ONCE(rdp->rcu_urgent_qs, true);
912 		resched_cpu(rdp->cpu);
913 		WRITE_ONCE(rdp->last_fqs_resched, jiffies);
914 	}
915 
916 	/*
917 	 * If more than halfway to RCU CPU stall-warning time, invoke
918 	 * resched_cpu() more frequently to try to loosen things up a bit.
919 	 * Also check to see if the CPU is getting hammered with interrupts,
920 	 * but only once per grace period, just to keep the IPIs down to
921 	 * a dull roar.
922 	 */
923 	if (time_after(jiffies, rcu_state.jiffies_resched)) {
924 		if (time_after(jiffies,
925 			       READ_ONCE(rdp->last_fqs_resched) + jtsq)) {
926 			resched_cpu(rdp->cpu);
927 			WRITE_ONCE(rdp->last_fqs_resched, jiffies);
928 		}
929 		if (IS_ENABLED(CONFIG_IRQ_WORK) &&
930 		    !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
931 		    (rnp->ffmask & rdp->grpmask)) {
932 			rdp->rcu_iw_pending = true;
933 			rdp->rcu_iw_gp_seq = rnp->gp_seq;
934 			irq_work_queue_on(&rdp->rcu_iw, rdp->cpu);
935 		}
936 	}
937 
938 	return 0;
939 }
940 
941 /* Trace-event wrapper function for trace_rcu_future_grace_period.  */
trace_rcu_this_gp(struct rcu_node * rnp,struct rcu_data * rdp,unsigned long gp_seq_req,const char * s)942 static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
943 			      unsigned long gp_seq_req, const char *s)
944 {
945 	trace_rcu_future_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
946 				      gp_seq_req, rnp->level,
947 				      rnp->grplo, rnp->grphi, s);
948 }
949 
950 /*
951  * rcu_start_this_gp - Request the start of a particular grace period
952  * @rnp_start: The leaf node of the CPU from which to start.
953  * @rdp: The rcu_data corresponding to the CPU from which to start.
954  * @gp_seq_req: The gp_seq of the grace period to start.
955  *
956  * Start the specified grace period, as needed to handle newly arrived
957  * callbacks.  The required future grace periods are recorded in each
958  * rcu_node structure's ->gp_seq_needed field.  Returns true if there
959  * is reason to awaken the grace-period kthread.
960  *
961  * The caller must hold the specified rcu_node structure's ->lock, which
962  * is why the caller is responsible for waking the grace-period kthread.
963  *
964  * Returns true if the GP thread needs to be awakened else false.
965  */
rcu_start_this_gp(struct rcu_node * rnp_start,struct rcu_data * rdp,unsigned long gp_seq_req)966 static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp,
967 			      unsigned long gp_seq_req)
968 {
969 	bool ret = false;
970 	struct rcu_node *rnp;
971 
972 	/*
973 	 * Use funnel locking to either acquire the root rcu_node
974 	 * structure's lock or bail out if the need for this grace period
975 	 * has already been recorded -- or if that grace period has in
976 	 * fact already started.  If there is already a grace period in
977 	 * progress in a non-leaf node, no recording is needed because the
978 	 * end of the grace period will scan the leaf rcu_node structures.
979 	 * Note that rnp_start->lock must not be released.
980 	 */
981 	raw_lockdep_assert_held_rcu_node(rnp_start);
982 	trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, TPS("Startleaf"));
983 	for (rnp = rnp_start; 1; rnp = rnp->parent) {
984 		if (rnp != rnp_start)
985 			raw_spin_lock_rcu_node(rnp);
986 		if (ULONG_CMP_GE(rnp->gp_seq_needed, gp_seq_req) ||
987 		    rcu_seq_started(&rnp->gp_seq, gp_seq_req) ||
988 		    (rnp != rnp_start &&
989 		     rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))) {
990 			trace_rcu_this_gp(rnp, rdp, gp_seq_req,
991 					  TPS("Prestarted"));
992 			goto unlock_out;
993 		}
994 		WRITE_ONCE(rnp->gp_seq_needed, gp_seq_req);
995 		if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) {
996 			/*
997 			 * We just marked the leaf or internal node, and a
998 			 * grace period is in progress, which means that
999 			 * rcu_gp_cleanup() will see the marking.  Bail to
1000 			 * reduce contention.
1001 			 */
1002 			trace_rcu_this_gp(rnp_start, rdp, gp_seq_req,
1003 					  TPS("Startedleaf"));
1004 			goto unlock_out;
1005 		}
1006 		if (rnp != rnp_start && rnp->parent != NULL)
1007 			raw_spin_unlock_rcu_node(rnp);
1008 		if (!rnp->parent)
1009 			break;  /* At root, and perhaps also leaf. */
1010 	}
1011 
1012 	/* If GP already in progress, just leave, otherwise start one. */
1013 	if (rcu_gp_in_progress()) {
1014 		trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot"));
1015 		goto unlock_out;
1016 	}
1017 	trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot"));
1018 	WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags | RCU_GP_FLAG_INIT);
1019 	WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
1020 	if (!READ_ONCE(rcu_state.gp_kthread)) {
1021 		trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread"));
1022 		goto unlock_out;
1023 	}
1024 	trace_rcu_grace_period(rcu_state.name, data_race(rcu_state.gp_seq), TPS("newreq"));
1025 	ret = true;  /* Caller must wake GP kthread. */
1026 unlock_out:
1027 	/* Push furthest requested GP to leaf node and rcu_data structure. */
1028 	if (ULONG_CMP_LT(gp_seq_req, rnp->gp_seq_needed)) {
1029 		WRITE_ONCE(rnp_start->gp_seq_needed, rnp->gp_seq_needed);
1030 		WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed);
1031 	}
1032 	if (rnp != rnp_start)
1033 		raw_spin_unlock_rcu_node(rnp);
1034 	return ret;
1035 }
1036 
1037 /*
1038  * Clean up any old requests for the just-ended grace period.  Also return
1039  * whether any additional grace periods have been requested.
1040  */
rcu_future_gp_cleanup(struct rcu_node * rnp)1041 static bool rcu_future_gp_cleanup(struct rcu_node *rnp)
1042 {
1043 	bool needmore;
1044 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
1045 
1046 	needmore = ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed);
1047 	if (!needmore)
1048 		rnp->gp_seq_needed = rnp->gp_seq; /* Avoid counter wrap. */
1049 	trace_rcu_this_gp(rnp, rdp, rnp->gp_seq,
1050 			  needmore ? TPS("CleanupMore") : TPS("Cleanup"));
1051 	return needmore;
1052 }
1053 
swake_up_one_online_ipi(void * arg)1054 static void swake_up_one_online_ipi(void *arg)
1055 {
1056 	struct swait_queue_head *wqh = arg;
1057 
1058 	swake_up_one(wqh);
1059 }
1060 
swake_up_one_online(struct swait_queue_head * wqh)1061 static void swake_up_one_online(struct swait_queue_head *wqh)
1062 {
1063 	int cpu = get_cpu();
1064 
1065 	/*
1066 	 * If called from rcutree_report_cpu_starting(), wake up
1067 	 * is dangerous that late in the CPU-down hotplug process. The
1068 	 * scheduler might queue an ignored hrtimer. Defer the wake up
1069 	 * to an online CPU instead.
1070 	 */
1071 	if (unlikely(cpu_is_offline(cpu))) {
1072 		int target;
1073 
1074 		target = cpumask_any_and(housekeeping_cpumask(HK_TYPE_RCU),
1075 					 cpu_online_mask);
1076 
1077 		smp_call_function_single(target, swake_up_one_online_ipi,
1078 					 wqh, 0);
1079 		put_cpu();
1080 	} else {
1081 		put_cpu();
1082 		swake_up_one(wqh);
1083 	}
1084 }
1085 
1086 /*
1087  * Awaken the grace-period kthread.  Don't do a self-awaken (unless in an
1088  * interrupt or softirq handler, in which case we just might immediately
1089  * sleep upon return, resulting in a grace-period hang), and don't bother
1090  * awakening when there is nothing for the grace-period kthread to do
1091  * (as in several CPUs raced to awaken, we lost), and finally don't try
1092  * to awaken a kthread that has not yet been created.  If all those checks
1093  * are passed, track some debug information and awaken.
1094  *
1095  * So why do the self-wakeup when in an interrupt or softirq handler
1096  * in the grace-period kthread's context?  Because the kthread might have
1097  * been interrupted just as it was going to sleep, and just after the final
1098  * pre-sleep check of the awaken condition.  In this case, a wakeup really
1099  * is required, and is therefore supplied.
1100  */
rcu_gp_kthread_wake(void)1101 static void rcu_gp_kthread_wake(void)
1102 {
1103 	struct task_struct *t = READ_ONCE(rcu_state.gp_kthread);
1104 
1105 	if ((current == t && !in_hardirq() && !in_serving_softirq()) ||
1106 	    !READ_ONCE(rcu_state.gp_flags) || !t)
1107 		return;
1108 	WRITE_ONCE(rcu_state.gp_wake_time, jiffies);
1109 	WRITE_ONCE(rcu_state.gp_wake_seq, READ_ONCE(rcu_state.gp_seq));
1110 	swake_up_one_online(&rcu_state.gp_wq);
1111 }
1112 
1113 /*
1114  * If there is room, assign a ->gp_seq number to any callbacks on this
1115  * CPU that have not already been assigned.  Also accelerate any callbacks
1116  * that were previously assigned a ->gp_seq number that has since proven
1117  * to be too conservative, which can happen if callbacks get assigned a
1118  * ->gp_seq number while RCU is idle, but with reference to a non-root
1119  * rcu_node structure.  This function is idempotent, so it does not hurt
1120  * to call it repeatedly.  Returns an flag saying that we should awaken
1121  * the RCU grace-period kthread.
1122  *
1123  * The caller must hold rnp->lock with interrupts disabled.
1124  */
rcu_accelerate_cbs(struct rcu_node * rnp,struct rcu_data * rdp)1125 static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
1126 {
1127 	unsigned long gp_seq_req;
1128 	bool ret = false;
1129 
1130 	rcu_lockdep_assert_cblist_protected(rdp);
1131 	raw_lockdep_assert_held_rcu_node(rnp);
1132 
1133 	/* If no pending (not yet ready to invoke) callbacks, nothing to do. */
1134 	if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1135 		return false;
1136 
1137 	trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPreAcc"));
1138 
1139 	/*
1140 	 * Callbacks are often registered with incomplete grace-period
1141 	 * information.  Something about the fact that getting exact
1142 	 * information requires acquiring a global lock...  RCU therefore
1143 	 * makes a conservative estimate of the grace period number at which
1144 	 * a given callback will become ready to invoke.	The following
1145 	 * code checks this estimate and improves it when possible, thus
1146 	 * accelerating callback invocation to an earlier grace-period
1147 	 * number.
1148 	 */
1149 	gp_seq_req = rcu_seq_snap(&rcu_state.gp_seq);
1150 	if (rcu_segcblist_accelerate(&rdp->cblist, gp_seq_req))
1151 		ret = rcu_start_this_gp(rnp, rdp, gp_seq_req);
1152 
1153 	/* Trace depending on how much we were able to accelerate. */
1154 	if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL))
1155 		trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccWaitCB"));
1156 	else
1157 		trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccReadyCB"));
1158 
1159 	trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPostAcc"));
1160 
1161 	return ret;
1162 }
1163 
1164 /*
1165  * Similar to rcu_accelerate_cbs(), but does not require that the leaf
1166  * rcu_node structure's ->lock be held.  It consults the cached value
1167  * of ->gp_seq_needed in the rcu_data structure, and if that indicates
1168  * that a new grace-period request be made, invokes rcu_accelerate_cbs()
1169  * while holding the leaf rcu_node structure's ->lock.
1170  */
rcu_accelerate_cbs_unlocked(struct rcu_node * rnp,struct rcu_data * rdp)1171 static void rcu_accelerate_cbs_unlocked(struct rcu_node *rnp,
1172 					struct rcu_data *rdp)
1173 {
1174 	unsigned long c;
1175 	bool needwake;
1176 
1177 	rcu_lockdep_assert_cblist_protected(rdp);
1178 	c = rcu_seq_snap(&rcu_state.gp_seq);
1179 	if (!READ_ONCE(rdp->gpwrap) && ULONG_CMP_GE(rdp->gp_seq_needed, c)) {
1180 		/* Old request still live, so mark recent callbacks. */
1181 		(void)rcu_segcblist_accelerate(&rdp->cblist, c);
1182 		return;
1183 	}
1184 	raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
1185 	needwake = rcu_accelerate_cbs(rnp, rdp);
1186 	raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
1187 	if (needwake)
1188 		rcu_gp_kthread_wake();
1189 }
1190 
1191 /*
1192  * Move any callbacks whose grace period has completed to the
1193  * RCU_DONE_TAIL sublist, then compact the remaining sublists and
1194  * assign ->gp_seq numbers to any callbacks in the RCU_NEXT_TAIL
1195  * sublist.  This function is idempotent, so it does not hurt to
1196  * invoke it repeatedly.  As long as it is not invoked -too- often...
1197  * Returns true if the RCU grace-period kthread needs to be awakened.
1198  *
1199  * The caller must hold rnp->lock with interrupts disabled.
1200  */
rcu_advance_cbs(struct rcu_node * rnp,struct rcu_data * rdp)1201 static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
1202 {
1203 	rcu_lockdep_assert_cblist_protected(rdp);
1204 	raw_lockdep_assert_held_rcu_node(rnp);
1205 
1206 	/* If no pending (not yet ready to invoke) callbacks, nothing to do. */
1207 	if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1208 		return false;
1209 
1210 	/*
1211 	 * Find all callbacks whose ->gp_seq numbers indicate that they
1212 	 * are ready to invoke, and put them into the RCU_DONE_TAIL sublist.
1213 	 */
1214 	rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq);
1215 
1216 	/* Classify any remaining callbacks. */
1217 	return rcu_accelerate_cbs(rnp, rdp);
1218 }
1219 
1220 /*
1221  * Move and classify callbacks, but only if doing so won't require
1222  * that the RCU grace-period kthread be awakened.
1223  */
rcu_advance_cbs_nowake(struct rcu_node * rnp,struct rcu_data * rdp)1224 static void __maybe_unused rcu_advance_cbs_nowake(struct rcu_node *rnp,
1225 						  struct rcu_data *rdp)
1226 {
1227 	rcu_lockdep_assert_cblist_protected(rdp);
1228 	if (!rcu_seq_state(rcu_seq_current(&rnp->gp_seq)) || !raw_spin_trylock_rcu_node(rnp))
1229 		return;
1230 	// The grace period cannot end while we hold the rcu_node lock.
1231 	if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))
1232 		WARN_ON_ONCE(rcu_advance_cbs(rnp, rdp));
1233 	raw_spin_unlock_rcu_node(rnp);
1234 }
1235 
1236 /*
1237  * In CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels, attempt to generate a
1238  * quiescent state.  This is intended to be invoked when the CPU notices
1239  * a new grace period.
1240  */
rcu_strict_gp_check_qs(void)1241 static void rcu_strict_gp_check_qs(void)
1242 {
1243 	if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) {
1244 		rcu_read_lock();
1245 		rcu_read_unlock();
1246 	}
1247 }
1248 
1249 /*
1250  * Update CPU-local rcu_data state to record the beginnings and ends of
1251  * grace periods.  The caller must hold the ->lock of the leaf rcu_node
1252  * structure corresponding to the current CPU, and must have irqs disabled.
1253  * Returns true if the grace-period kthread needs to be awakened.
1254  */
__note_gp_changes(struct rcu_node * rnp,struct rcu_data * rdp)1255 static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp)
1256 {
1257 	bool ret = false;
1258 	bool need_qs;
1259 	const bool offloaded = rcu_rdp_is_offloaded(rdp);
1260 
1261 	raw_lockdep_assert_held_rcu_node(rnp);
1262 
1263 	if (rdp->gp_seq == rnp->gp_seq)
1264 		return false; /* Nothing to do. */
1265 
1266 	/* Handle the ends of any preceding grace periods first. */
1267 	if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) ||
1268 	    unlikely(READ_ONCE(rdp->gpwrap))) {
1269 		if (!offloaded)
1270 			ret = rcu_advance_cbs(rnp, rdp); /* Advance CBs. */
1271 		rdp->core_needs_qs = false;
1272 		trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuend"));
1273 	} else {
1274 		if (!offloaded)
1275 			ret = rcu_accelerate_cbs(rnp, rdp); /* Recent CBs. */
1276 		if (rdp->core_needs_qs)
1277 			rdp->core_needs_qs = !!(rnp->qsmask & rdp->grpmask);
1278 	}
1279 
1280 	/* Now handle the beginnings of any new-to-this-CPU grace periods. */
1281 	if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) ||
1282 	    unlikely(READ_ONCE(rdp->gpwrap))) {
1283 		/*
1284 		 * If the current grace period is waiting for this CPU,
1285 		 * set up to detect a quiescent state, otherwise don't
1286 		 * go looking for one.
1287 		 */
1288 		trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, TPS("cpustart"));
1289 		need_qs = !!(rnp->qsmask & rdp->grpmask);
1290 		rdp->cpu_no_qs.b.norm = need_qs;
1291 		rdp->core_needs_qs = need_qs;
1292 		zero_cpu_stall_ticks(rdp);
1293 	}
1294 	rdp->gp_seq = rnp->gp_seq;  /* Remember new grace-period state. */
1295 	if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap)
1296 		WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed);
1297 	if (IS_ENABLED(CONFIG_PROVE_RCU) && READ_ONCE(rdp->gpwrap))
1298 		WRITE_ONCE(rdp->last_sched_clock, jiffies);
1299 	WRITE_ONCE(rdp->gpwrap, false);
1300 	rcu_gpnum_ovf(rnp, rdp);
1301 	return ret;
1302 }
1303 
note_gp_changes(struct rcu_data * rdp)1304 static void note_gp_changes(struct rcu_data *rdp)
1305 {
1306 	unsigned long flags;
1307 	bool needwake;
1308 	struct rcu_node *rnp;
1309 
1310 	local_irq_save(flags);
1311 	rnp = rdp->mynode;
1312 	if ((rdp->gp_seq == rcu_seq_current(&rnp->gp_seq) &&
1313 	     !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */
1314 	    !raw_spin_trylock_rcu_node(rnp)) { /* irqs already off, so later. */
1315 		local_irq_restore(flags);
1316 		return;
1317 	}
1318 	needwake = __note_gp_changes(rnp, rdp);
1319 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1320 	rcu_strict_gp_check_qs();
1321 	if (needwake)
1322 		rcu_gp_kthread_wake();
1323 }
1324 
1325 static atomic_t *rcu_gp_slow_suppress;
1326 
1327 /* Register a counter to suppress debugging grace-period delays. */
rcu_gp_slow_register(atomic_t * rgssp)1328 void rcu_gp_slow_register(atomic_t *rgssp)
1329 {
1330 	WARN_ON_ONCE(rcu_gp_slow_suppress);
1331 
1332 	WRITE_ONCE(rcu_gp_slow_suppress, rgssp);
1333 }
1334 EXPORT_SYMBOL_GPL(rcu_gp_slow_register);
1335 
1336 /* Unregister a counter, with NULL for not caring which. */
rcu_gp_slow_unregister(atomic_t * rgssp)1337 void rcu_gp_slow_unregister(atomic_t *rgssp)
1338 {
1339 	WARN_ON_ONCE(rgssp && rgssp != rcu_gp_slow_suppress);
1340 
1341 	WRITE_ONCE(rcu_gp_slow_suppress, NULL);
1342 }
1343 EXPORT_SYMBOL_GPL(rcu_gp_slow_unregister);
1344 
rcu_gp_slow_is_suppressed(void)1345 static bool rcu_gp_slow_is_suppressed(void)
1346 {
1347 	atomic_t *rgssp = READ_ONCE(rcu_gp_slow_suppress);
1348 
1349 	return rgssp && atomic_read(rgssp);
1350 }
1351 
rcu_gp_slow(int delay)1352 static void rcu_gp_slow(int delay)
1353 {
1354 	if (!rcu_gp_slow_is_suppressed() && delay > 0 &&
1355 	    !(rcu_seq_ctr(rcu_state.gp_seq) % (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay)))
1356 		schedule_timeout_idle(delay);
1357 }
1358 
1359 static unsigned long sleep_duration;
1360 
1361 /* Allow rcutorture to stall the grace-period kthread. */
rcu_gp_set_torture_wait(int duration)1362 void rcu_gp_set_torture_wait(int duration)
1363 {
1364 	if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST) && duration > 0)
1365 		WRITE_ONCE(sleep_duration, duration);
1366 }
1367 EXPORT_SYMBOL_GPL(rcu_gp_set_torture_wait);
1368 
1369 /* Actually implement the aforementioned wait. */
rcu_gp_torture_wait(void)1370 static void rcu_gp_torture_wait(void)
1371 {
1372 	unsigned long duration;
1373 
1374 	if (!IS_ENABLED(CONFIG_RCU_TORTURE_TEST))
1375 		return;
1376 	duration = xchg(&sleep_duration, 0UL);
1377 	if (duration > 0) {
1378 		pr_alert("%s: Waiting %lu jiffies\n", __func__, duration);
1379 		schedule_timeout_idle(duration);
1380 		pr_alert("%s: Wait complete\n", __func__);
1381 	}
1382 }
1383 
1384 /*
1385  * Handler for on_each_cpu() to invoke the target CPU's RCU core
1386  * processing.
1387  */
rcu_strict_gp_boundary(void * unused)1388 static void rcu_strict_gp_boundary(void *unused)
1389 {
1390 	invoke_rcu_core();
1391 }
1392 
1393 // Has rcu_init() been invoked?  This is used (for example) to determine
1394 // whether spinlocks may be acquired safely.
rcu_init_invoked(void)1395 static bool rcu_init_invoked(void)
1396 {
1397 	return !!rcu_state.n_online_cpus;
1398 }
1399 
1400 // Make the polled API aware of the beginning of a grace period.
rcu_poll_gp_seq_start(unsigned long * snap)1401 static void rcu_poll_gp_seq_start(unsigned long *snap)
1402 {
1403 	struct rcu_node *rnp = rcu_get_root();
1404 
1405 	if (rcu_init_invoked())
1406 		raw_lockdep_assert_held_rcu_node(rnp);
1407 
1408 	// If RCU was idle, note beginning of GP.
1409 	if (!rcu_seq_state(rcu_state.gp_seq_polled))
1410 		rcu_seq_start(&rcu_state.gp_seq_polled);
1411 
1412 	// Either way, record current state.
1413 	*snap = rcu_state.gp_seq_polled;
1414 }
1415 
1416 // Make the polled API aware of the end of a grace period.
rcu_poll_gp_seq_end(unsigned long * snap)1417 static void rcu_poll_gp_seq_end(unsigned long *snap)
1418 {
1419 	struct rcu_node *rnp = rcu_get_root();
1420 
1421 	if (rcu_init_invoked())
1422 		raw_lockdep_assert_held_rcu_node(rnp);
1423 
1424 	// If the previously noted GP is still in effect, record the
1425 	// end of that GP.  Either way, zero counter to avoid counter-wrap
1426 	// problems.
1427 	if (*snap && *snap == rcu_state.gp_seq_polled) {
1428 		rcu_seq_end(&rcu_state.gp_seq_polled);
1429 		rcu_state.gp_seq_polled_snap = 0;
1430 		rcu_state.gp_seq_polled_exp_snap = 0;
1431 	} else {
1432 		*snap = 0;
1433 	}
1434 }
1435 
1436 // Make the polled API aware of the beginning of a grace period, but
1437 // where caller does not hold the root rcu_node structure's lock.
rcu_poll_gp_seq_start_unlocked(unsigned long * snap)1438 static void rcu_poll_gp_seq_start_unlocked(unsigned long *snap)
1439 {
1440 	unsigned long flags;
1441 	struct rcu_node *rnp = rcu_get_root();
1442 
1443 	if (rcu_init_invoked()) {
1444 		lockdep_assert_irqs_enabled();
1445 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
1446 	}
1447 	rcu_poll_gp_seq_start(snap);
1448 	if (rcu_init_invoked())
1449 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1450 }
1451 
1452 // Make the polled API aware of the end of a grace period, but where
1453 // caller does not hold the root rcu_node structure's lock.
rcu_poll_gp_seq_end_unlocked(unsigned long * snap)1454 static void rcu_poll_gp_seq_end_unlocked(unsigned long *snap)
1455 {
1456 	unsigned long flags;
1457 	struct rcu_node *rnp = rcu_get_root();
1458 
1459 	if (rcu_init_invoked()) {
1460 		lockdep_assert_irqs_enabled();
1461 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
1462 	}
1463 	rcu_poll_gp_seq_end(snap);
1464 	if (rcu_init_invoked())
1465 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1466 }
1467 
1468 /*
1469  * Initialize a new grace period.  Return false if no grace period required.
1470  */
rcu_gp_init(void)1471 static noinline_for_stack bool rcu_gp_init(void)
1472 {
1473 	unsigned long flags;
1474 	unsigned long oldmask;
1475 	unsigned long mask;
1476 	struct rcu_data *rdp;
1477 	struct rcu_node *rnp = rcu_get_root();
1478 
1479 	WRITE_ONCE(rcu_state.gp_activity, jiffies);
1480 	raw_spin_lock_irq_rcu_node(rnp);
1481 	if (!READ_ONCE(rcu_state.gp_flags)) {
1482 		/* Spurious wakeup, tell caller to go back to sleep.  */
1483 		raw_spin_unlock_irq_rcu_node(rnp);
1484 		return false;
1485 	}
1486 	WRITE_ONCE(rcu_state.gp_flags, 0); /* Clear all flags: New GP. */
1487 
1488 	if (WARN_ON_ONCE(rcu_gp_in_progress())) {
1489 		/*
1490 		 * Grace period already in progress, don't start another.
1491 		 * Not supposed to be able to happen.
1492 		 */
1493 		raw_spin_unlock_irq_rcu_node(rnp);
1494 		return false;
1495 	}
1496 
1497 	/* Advance to a new grace period and initialize state. */
1498 	record_gp_stall_check_time();
1499 	/* Record GP times before starting GP, hence rcu_seq_start(). */
1500 	rcu_seq_start(&rcu_state.gp_seq);
1501 	ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq);
1502 	trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("start"));
1503 	rcu_poll_gp_seq_start(&rcu_state.gp_seq_polled_snap);
1504 	raw_spin_unlock_irq_rcu_node(rnp);
1505 
1506 	/*
1507 	 * Apply per-leaf buffered online and offline operations to
1508 	 * the rcu_node tree. Note that this new grace period need not
1509 	 * wait for subsequent online CPUs, and that RCU hooks in the CPU
1510 	 * offlining path, when combined with checks in this function,
1511 	 * will handle CPUs that are currently going offline or that will
1512 	 * go offline later.  Please also refer to "Hotplug CPU" section
1513 	 * of RCU's Requirements documentation.
1514 	 */
1515 	WRITE_ONCE(rcu_state.gp_state, RCU_GP_ONOFF);
1516 	/* Exclude CPU hotplug operations. */
1517 	rcu_for_each_leaf_node(rnp) {
1518 		local_irq_save(flags);
1519 		arch_spin_lock(&rcu_state.ofl_lock);
1520 		raw_spin_lock_rcu_node(rnp);
1521 		if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
1522 		    !rnp->wait_blkd_tasks) {
1523 			/* Nothing to do on this leaf rcu_node structure. */
1524 			raw_spin_unlock_rcu_node(rnp);
1525 			arch_spin_unlock(&rcu_state.ofl_lock);
1526 			local_irq_restore(flags);
1527 			continue;
1528 		}
1529 
1530 		/* Record old state, apply changes to ->qsmaskinit field. */
1531 		oldmask = rnp->qsmaskinit;
1532 		rnp->qsmaskinit = rnp->qsmaskinitnext;
1533 
1534 		/* If zero-ness of ->qsmaskinit changed, propagate up tree. */
1535 		if (!oldmask != !rnp->qsmaskinit) {
1536 			if (!oldmask) { /* First online CPU for rcu_node. */
1537 				if (!rnp->wait_blkd_tasks) /* Ever offline? */
1538 					rcu_init_new_rnp(rnp);
1539 			} else if (rcu_preempt_has_tasks(rnp)) {
1540 				rnp->wait_blkd_tasks = true; /* blocked tasks */
1541 			} else { /* Last offline CPU and can propagate. */
1542 				rcu_cleanup_dead_rnp(rnp);
1543 			}
1544 		}
1545 
1546 		/*
1547 		 * If all waited-on tasks from prior grace period are
1548 		 * done, and if all this rcu_node structure's CPUs are
1549 		 * still offline, propagate up the rcu_node tree and
1550 		 * clear ->wait_blkd_tasks.  Otherwise, if one of this
1551 		 * rcu_node structure's CPUs has since come back online,
1552 		 * simply clear ->wait_blkd_tasks.
1553 		 */
1554 		if (rnp->wait_blkd_tasks &&
1555 		    (!rcu_preempt_has_tasks(rnp) || rnp->qsmaskinit)) {
1556 			rnp->wait_blkd_tasks = false;
1557 			if (!rnp->qsmaskinit)
1558 				rcu_cleanup_dead_rnp(rnp);
1559 		}
1560 
1561 		raw_spin_unlock_rcu_node(rnp);
1562 		arch_spin_unlock(&rcu_state.ofl_lock);
1563 		local_irq_restore(flags);
1564 	}
1565 	rcu_gp_slow(gp_preinit_delay); /* Races with CPU hotplug. */
1566 
1567 	/*
1568 	 * Set the quiescent-state-needed bits in all the rcu_node
1569 	 * structures for all currently online CPUs in breadth-first
1570 	 * order, starting from the root rcu_node structure, relying on the
1571 	 * layout of the tree within the rcu_state.node[] array.  Note that
1572 	 * other CPUs will access only the leaves of the hierarchy, thus
1573 	 * seeing that no grace period is in progress, at least until the
1574 	 * corresponding leaf node has been initialized.
1575 	 *
1576 	 * The grace period cannot complete until the initialization
1577 	 * process finishes, because this kthread handles both.
1578 	 */
1579 	WRITE_ONCE(rcu_state.gp_state, RCU_GP_INIT);
1580 	rcu_for_each_node_breadth_first(rnp) {
1581 		rcu_gp_slow(gp_init_delay);
1582 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
1583 		rdp = this_cpu_ptr(&rcu_data);
1584 		rcu_preempt_check_blocked_tasks(rnp);
1585 		rnp->qsmask = rnp->qsmaskinit;
1586 		WRITE_ONCE(rnp->gp_seq, rcu_state.gp_seq);
1587 		if (rnp == rdp->mynode)
1588 			(void)__note_gp_changes(rnp, rdp);
1589 		rcu_preempt_boost_start_gp(rnp);
1590 		trace_rcu_grace_period_init(rcu_state.name, rnp->gp_seq,
1591 					    rnp->level, rnp->grplo,
1592 					    rnp->grphi, rnp->qsmask);
1593 		/* Quiescent states for tasks on any now-offline CPUs. */
1594 		mask = rnp->qsmask & ~rnp->qsmaskinitnext;
1595 		rnp->rcu_gp_init_mask = mask;
1596 		if ((mask || rnp->wait_blkd_tasks) && rcu_is_leaf_node(rnp))
1597 			rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
1598 		else
1599 			raw_spin_unlock_irq_rcu_node(rnp);
1600 		cond_resched_tasks_rcu_qs();
1601 		WRITE_ONCE(rcu_state.gp_activity, jiffies);
1602 	}
1603 
1604 	// If strict, make all CPUs aware of new grace period.
1605 	if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
1606 		on_each_cpu(rcu_strict_gp_boundary, NULL, 0);
1607 
1608 	return true;
1609 }
1610 
1611 /*
1612  * Helper function for swait_event_idle_exclusive() wakeup at force-quiescent-state
1613  * time.
1614  */
rcu_gp_fqs_check_wake(int * gfp)1615 static bool rcu_gp_fqs_check_wake(int *gfp)
1616 {
1617 	struct rcu_node *rnp = rcu_get_root();
1618 
1619 	// If under overload conditions, force an immediate FQS scan.
1620 	if (*gfp & RCU_GP_FLAG_OVLD)
1621 		return true;
1622 
1623 	// Someone like call_rcu() requested a force-quiescent-state scan.
1624 	*gfp = READ_ONCE(rcu_state.gp_flags);
1625 	if (*gfp & RCU_GP_FLAG_FQS)
1626 		return true;
1627 
1628 	// The current grace period has completed.
1629 	if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp))
1630 		return true;
1631 
1632 	return false;
1633 }
1634 
1635 /*
1636  * Do one round of quiescent-state forcing.
1637  */
rcu_gp_fqs(bool first_time)1638 static void rcu_gp_fqs(bool first_time)
1639 {
1640 	int nr_fqs = READ_ONCE(rcu_state.nr_fqs_jiffies_stall);
1641 	struct rcu_node *rnp = rcu_get_root();
1642 
1643 	WRITE_ONCE(rcu_state.gp_activity, jiffies);
1644 	WRITE_ONCE(rcu_state.n_force_qs, rcu_state.n_force_qs + 1);
1645 
1646 	WARN_ON_ONCE(nr_fqs > 3);
1647 	/* Only countdown nr_fqs for stall purposes if jiffies moves. */
1648 	if (nr_fqs) {
1649 		if (nr_fqs == 1) {
1650 			WRITE_ONCE(rcu_state.jiffies_stall,
1651 				   jiffies + rcu_jiffies_till_stall_check());
1652 		}
1653 		WRITE_ONCE(rcu_state.nr_fqs_jiffies_stall, --nr_fqs);
1654 	}
1655 
1656 	if (first_time) {
1657 		/* Collect dyntick-idle snapshots. */
1658 		force_qs_rnp(dyntick_save_progress_counter);
1659 	} else {
1660 		/* Handle dyntick-idle and offline CPUs. */
1661 		force_qs_rnp(rcu_implicit_dynticks_qs);
1662 	}
1663 	/* Clear flag to prevent immediate re-entry. */
1664 	if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
1665 		raw_spin_lock_irq_rcu_node(rnp);
1666 		WRITE_ONCE(rcu_state.gp_flags,
1667 			   READ_ONCE(rcu_state.gp_flags) & ~RCU_GP_FLAG_FQS);
1668 		raw_spin_unlock_irq_rcu_node(rnp);
1669 	}
1670 }
1671 
1672 /*
1673  * Loop doing repeated quiescent-state forcing until the grace period ends.
1674  */
rcu_gp_fqs_loop(void)1675 static noinline_for_stack void rcu_gp_fqs_loop(void)
1676 {
1677 	bool first_gp_fqs = true;
1678 	int gf = 0;
1679 	unsigned long j;
1680 	int ret;
1681 	struct rcu_node *rnp = rcu_get_root();
1682 
1683 	j = READ_ONCE(jiffies_till_first_fqs);
1684 	if (rcu_state.cbovld)
1685 		gf = RCU_GP_FLAG_OVLD;
1686 	ret = 0;
1687 	for (;;) {
1688 		if (rcu_state.cbovld) {
1689 			j = (j + 2) / 3;
1690 			if (j <= 0)
1691 				j = 1;
1692 		}
1693 		if (!ret || time_before(jiffies + j, rcu_state.jiffies_force_qs)) {
1694 			WRITE_ONCE(rcu_state.jiffies_force_qs, jiffies + j);
1695 			/*
1696 			 * jiffies_force_qs before RCU_GP_WAIT_FQS state
1697 			 * update; required for stall checks.
1698 			 */
1699 			smp_wmb();
1700 			WRITE_ONCE(rcu_state.jiffies_kick_kthreads,
1701 				   jiffies + (j ? 3 * j : 2));
1702 		}
1703 		trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1704 				       TPS("fqswait"));
1705 		WRITE_ONCE(rcu_state.gp_state, RCU_GP_WAIT_FQS);
1706 		(void)swait_event_idle_timeout_exclusive(rcu_state.gp_wq,
1707 				 rcu_gp_fqs_check_wake(&gf), j);
1708 		rcu_gp_torture_wait();
1709 		WRITE_ONCE(rcu_state.gp_state, RCU_GP_DOING_FQS);
1710 		/* Locking provides needed memory barriers. */
1711 		/*
1712 		 * Exit the loop if the root rcu_node structure indicates that the grace period
1713 		 * has ended, leave the loop.  The rcu_preempt_blocked_readers_cgp(rnp) check
1714 		 * is required only for single-node rcu_node trees because readers blocking
1715 		 * the current grace period are queued only on leaf rcu_node structures.
1716 		 * For multi-node trees, checking the root node's ->qsmask suffices, because a
1717 		 * given root node's ->qsmask bit is cleared only when all CPUs and tasks from
1718 		 * the corresponding leaf nodes have passed through their quiescent state.
1719 		 */
1720 		if (!READ_ONCE(rnp->qsmask) &&
1721 		    !rcu_preempt_blocked_readers_cgp(rnp))
1722 			break;
1723 		/* If time for quiescent-state forcing, do it. */
1724 		if (!time_after(rcu_state.jiffies_force_qs, jiffies) ||
1725 		    (gf & (RCU_GP_FLAG_FQS | RCU_GP_FLAG_OVLD))) {
1726 			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1727 					       TPS("fqsstart"));
1728 			rcu_gp_fqs(first_gp_fqs);
1729 			gf = 0;
1730 			if (first_gp_fqs) {
1731 				first_gp_fqs = false;
1732 				gf = rcu_state.cbovld ? RCU_GP_FLAG_OVLD : 0;
1733 			}
1734 			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1735 					       TPS("fqsend"));
1736 			cond_resched_tasks_rcu_qs();
1737 			WRITE_ONCE(rcu_state.gp_activity, jiffies);
1738 			ret = 0; /* Force full wait till next FQS. */
1739 			j = READ_ONCE(jiffies_till_next_fqs);
1740 		} else {
1741 			/* Deal with stray signal. */
1742 			cond_resched_tasks_rcu_qs();
1743 			WRITE_ONCE(rcu_state.gp_activity, jiffies);
1744 			WARN_ON(signal_pending(current));
1745 			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1746 					       TPS("fqswaitsig"));
1747 			ret = 1; /* Keep old FQS timing. */
1748 			j = jiffies;
1749 			if (time_after(jiffies, rcu_state.jiffies_force_qs))
1750 				j = 1;
1751 			else
1752 				j = rcu_state.jiffies_force_qs - j;
1753 			gf = 0;
1754 		}
1755 	}
1756 }
1757 
1758 /*
1759  * Clean up after the old grace period.
1760  */
rcu_gp_cleanup(void)1761 static noinline void rcu_gp_cleanup(void)
1762 {
1763 	int cpu;
1764 	bool needgp = false;
1765 	unsigned long gp_duration;
1766 	unsigned long new_gp_seq;
1767 	bool offloaded;
1768 	struct rcu_data *rdp;
1769 	struct rcu_node *rnp = rcu_get_root();
1770 	struct swait_queue_head *sq;
1771 
1772 	WRITE_ONCE(rcu_state.gp_activity, jiffies);
1773 	raw_spin_lock_irq_rcu_node(rnp);
1774 	rcu_state.gp_end = jiffies;
1775 	gp_duration = rcu_state.gp_end - rcu_state.gp_start;
1776 	if (gp_duration > rcu_state.gp_max)
1777 		rcu_state.gp_max = gp_duration;
1778 
1779 	/*
1780 	 * We know the grace period is complete, but to everyone else
1781 	 * it appears to still be ongoing.  But it is also the case
1782 	 * that to everyone else it looks like there is nothing that
1783 	 * they can do to advance the grace period.  It is therefore
1784 	 * safe for us to drop the lock in order to mark the grace
1785 	 * period as completed in all of the rcu_node structures.
1786 	 */
1787 	rcu_poll_gp_seq_end(&rcu_state.gp_seq_polled_snap);
1788 	raw_spin_unlock_irq_rcu_node(rnp);
1789 
1790 	/*
1791 	 * Propagate new ->gp_seq value to rcu_node structures so that
1792 	 * other CPUs don't have to wait until the start of the next grace
1793 	 * period to process their callbacks.  This also avoids some nasty
1794 	 * RCU grace-period initialization races by forcing the end of
1795 	 * the current grace period to be completely recorded in all of
1796 	 * the rcu_node structures before the beginning of the next grace
1797 	 * period is recorded in any of the rcu_node structures.
1798 	 */
1799 	new_gp_seq = rcu_state.gp_seq;
1800 	rcu_seq_end(&new_gp_seq);
1801 	rcu_for_each_node_breadth_first(rnp) {
1802 		raw_spin_lock_irq_rcu_node(rnp);
1803 		if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
1804 			dump_blkd_tasks(rnp, 10);
1805 		WARN_ON_ONCE(rnp->qsmask);
1806 		WRITE_ONCE(rnp->gp_seq, new_gp_seq);
1807 		if (!rnp->parent)
1808 			smp_mb(); // Order against failing poll_state_synchronize_rcu_full().
1809 		rdp = this_cpu_ptr(&rcu_data);
1810 		if (rnp == rdp->mynode)
1811 			needgp = __note_gp_changes(rnp, rdp) || needgp;
1812 		/* smp_mb() provided by prior unlock-lock pair. */
1813 		needgp = rcu_future_gp_cleanup(rnp) || needgp;
1814 		// Reset overload indication for CPUs no longer overloaded
1815 		if (rcu_is_leaf_node(rnp))
1816 			for_each_leaf_node_cpu_mask(rnp, cpu, rnp->cbovldmask) {
1817 				rdp = per_cpu_ptr(&rcu_data, cpu);
1818 				check_cb_ovld_locked(rdp, rnp);
1819 			}
1820 		sq = rcu_nocb_gp_get(rnp);
1821 		raw_spin_unlock_irq_rcu_node(rnp);
1822 		rcu_nocb_gp_cleanup(sq);
1823 		cond_resched_tasks_rcu_qs();
1824 		WRITE_ONCE(rcu_state.gp_activity, jiffies);
1825 		rcu_gp_slow(gp_cleanup_delay);
1826 	}
1827 	rnp = rcu_get_root();
1828 	raw_spin_lock_irq_rcu_node(rnp); /* GP before ->gp_seq update. */
1829 
1830 	/* Declare grace period done, trace first to use old GP number. */
1831 	trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("end"));
1832 	rcu_seq_end(&rcu_state.gp_seq);
1833 	ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq);
1834 	WRITE_ONCE(rcu_state.gp_state, RCU_GP_IDLE);
1835 	/* Check for GP requests since above loop. */
1836 	rdp = this_cpu_ptr(&rcu_data);
1837 	if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) {
1838 		trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed,
1839 				  TPS("CleanupMore"));
1840 		needgp = true;
1841 	}
1842 	/* Advance CBs to reduce false positives below. */
1843 	offloaded = rcu_rdp_is_offloaded(rdp);
1844 	if ((offloaded || !rcu_accelerate_cbs(rnp, rdp)) && needgp) {
1845 
1846 		// We get here if a grace period was needed (“needgp”)
1847 		// and the above call to rcu_accelerate_cbs() did not set
1848 		// the RCU_GP_FLAG_INIT bit in ->gp_state (which records
1849 		// the need for another grace period).  The purpose
1850 		// of the “offloaded” check is to avoid invoking
1851 		// rcu_accelerate_cbs() on an offloaded CPU because we do not
1852 		// hold the ->nocb_lock needed to safely access an offloaded
1853 		// ->cblist.  We do not want to acquire that lock because
1854 		// it can be heavily contended during callback floods.
1855 
1856 		WRITE_ONCE(rcu_state.gp_flags, RCU_GP_FLAG_INIT);
1857 		WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
1858 		trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("newreq"));
1859 	} else {
1860 
1861 		// We get here either if there is no need for an
1862 		// additional grace period or if rcu_accelerate_cbs() has
1863 		// already set the RCU_GP_FLAG_INIT bit in ->gp_flags. 
1864 		// So all we need to do is to clear all of the other
1865 		// ->gp_flags bits.
1866 
1867 		WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags & RCU_GP_FLAG_INIT);
1868 	}
1869 	raw_spin_unlock_irq_rcu_node(rnp);
1870 
1871 	// If strict, make all CPUs aware of the end of the old grace period.
1872 	if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
1873 		on_each_cpu(rcu_strict_gp_boundary, NULL, 0);
1874 }
1875 
1876 /*
1877  * Body of kthread that handles grace periods.
1878  */
rcu_gp_kthread(void * unused)1879 static int __noreturn rcu_gp_kthread(void *unused)
1880 {
1881 	rcu_bind_gp_kthread();
1882 	for (;;) {
1883 
1884 		/* Handle grace-period start. */
1885 		for (;;) {
1886 			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1887 					       TPS("reqwait"));
1888 			WRITE_ONCE(rcu_state.gp_state, RCU_GP_WAIT_GPS);
1889 			swait_event_idle_exclusive(rcu_state.gp_wq,
1890 					 READ_ONCE(rcu_state.gp_flags) &
1891 					 RCU_GP_FLAG_INIT);
1892 			rcu_gp_torture_wait();
1893 			WRITE_ONCE(rcu_state.gp_state, RCU_GP_DONE_GPS);
1894 			/* Locking provides needed memory barrier. */
1895 			if (rcu_gp_init())
1896 				break;
1897 			cond_resched_tasks_rcu_qs();
1898 			WRITE_ONCE(rcu_state.gp_activity, jiffies);
1899 			WARN_ON(signal_pending(current));
1900 			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1901 					       TPS("reqwaitsig"));
1902 		}
1903 
1904 		/* Handle quiescent-state forcing. */
1905 		rcu_gp_fqs_loop();
1906 
1907 		/* Handle grace-period end. */
1908 		WRITE_ONCE(rcu_state.gp_state, RCU_GP_CLEANUP);
1909 		rcu_gp_cleanup();
1910 		WRITE_ONCE(rcu_state.gp_state, RCU_GP_CLEANED);
1911 	}
1912 }
1913 
1914 /*
1915  * Report a full set of quiescent states to the rcu_state data structure.
1916  * Invoke rcu_gp_kthread_wake() to awaken the grace-period kthread if
1917  * another grace period is required.  Whether we wake the grace-period
1918  * kthread or it awakens itself for the next round of quiescent-state
1919  * forcing, that kthread will clean up after the just-completed grace
1920  * period.  Note that the caller must hold rnp->lock, which is released
1921  * before return.
1922  */
rcu_report_qs_rsp(unsigned long flags)1923 static void rcu_report_qs_rsp(unsigned long flags)
1924 	__releases(rcu_get_root()->lock)
1925 {
1926 	raw_lockdep_assert_held_rcu_node(rcu_get_root());
1927 	WARN_ON_ONCE(!rcu_gp_in_progress());
1928 	WRITE_ONCE(rcu_state.gp_flags,
1929 		   READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS);
1930 	raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(), flags);
1931 	rcu_gp_kthread_wake();
1932 }
1933 
1934 /*
1935  * Similar to rcu_report_qs_rdp(), for which it is a helper function.
1936  * Allows quiescent states for a group of CPUs to be reported at one go
1937  * to the specified rcu_node structure, though all the CPUs in the group
1938  * must be represented by the same rcu_node structure (which need not be a
1939  * leaf rcu_node structure, though it often will be).  The gps parameter
1940  * is the grace-period snapshot, which means that the quiescent states
1941  * are valid only if rnp->gp_seq is equal to gps.  That structure's lock
1942  * must be held upon entry, and it is released before return.
1943  *
1944  * As a special case, if mask is zero, the bit-already-cleared check is
1945  * disabled.  This allows propagating quiescent state due to resumed tasks
1946  * during grace-period initialization.
1947  */
rcu_report_qs_rnp(unsigned long mask,struct rcu_node * rnp,unsigned long gps,unsigned long flags)1948 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
1949 			      unsigned long gps, unsigned long flags)
1950 	__releases(rnp->lock)
1951 {
1952 	unsigned long oldmask = 0;
1953 	struct rcu_node *rnp_c;
1954 
1955 	raw_lockdep_assert_held_rcu_node(rnp);
1956 
1957 	/* Walk up the rcu_node hierarchy. */
1958 	for (;;) {
1959 		if ((!(rnp->qsmask & mask) && mask) || rnp->gp_seq != gps) {
1960 
1961 			/*
1962 			 * Our bit has already been cleared, or the
1963 			 * relevant grace period is already over, so done.
1964 			 */
1965 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1966 			return;
1967 		}
1968 		WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */
1969 		WARN_ON_ONCE(!rcu_is_leaf_node(rnp) &&
1970 			     rcu_preempt_blocked_readers_cgp(rnp));
1971 		WRITE_ONCE(rnp->qsmask, rnp->qsmask & ~mask);
1972 		trace_rcu_quiescent_state_report(rcu_state.name, rnp->gp_seq,
1973 						 mask, rnp->qsmask, rnp->level,
1974 						 rnp->grplo, rnp->grphi,
1975 						 !!rnp->gp_tasks);
1976 		if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
1977 
1978 			/* Other bits still set at this level, so done. */
1979 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1980 			return;
1981 		}
1982 		rnp->completedqs = rnp->gp_seq;
1983 		mask = rnp->grpmask;
1984 		if (rnp->parent == NULL) {
1985 
1986 			/* No more levels.  Exit loop holding root lock. */
1987 
1988 			break;
1989 		}
1990 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1991 		rnp_c = rnp;
1992 		rnp = rnp->parent;
1993 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
1994 		oldmask = READ_ONCE(rnp_c->qsmask);
1995 	}
1996 
1997 	/*
1998 	 * Get here if we are the last CPU to pass through a quiescent
1999 	 * state for this grace period.  Invoke rcu_report_qs_rsp()
2000 	 * to clean up and start the next grace period if one is needed.
2001 	 */
2002 	rcu_report_qs_rsp(flags); /* releases rnp->lock. */
2003 }
2004 
2005 /*
2006  * Record a quiescent state for all tasks that were previously queued
2007  * on the specified rcu_node structure and that were blocking the current
2008  * RCU grace period.  The caller must hold the corresponding rnp->lock with
2009  * irqs disabled, and this lock is released upon return, but irqs remain
2010  * disabled.
2011  */
2012 static void __maybe_unused
rcu_report_unblock_qs_rnp(struct rcu_node * rnp,unsigned long flags)2013 rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
2014 	__releases(rnp->lock)
2015 {
2016 	unsigned long gps;
2017 	unsigned long mask;
2018 	struct rcu_node *rnp_p;
2019 
2020 	raw_lockdep_assert_held_rcu_node(rnp);
2021 	if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT_RCU)) ||
2022 	    WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)) ||
2023 	    rnp->qsmask != 0) {
2024 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2025 		return;  /* Still need more quiescent states! */
2026 	}
2027 
2028 	rnp->completedqs = rnp->gp_seq;
2029 	rnp_p = rnp->parent;
2030 	if (rnp_p == NULL) {
2031 		/*
2032 		 * Only one rcu_node structure in the tree, so don't
2033 		 * try to report up to its nonexistent parent!
2034 		 */
2035 		rcu_report_qs_rsp(flags);
2036 		return;
2037 	}
2038 
2039 	/* Report up the rest of the hierarchy, tracking current ->gp_seq. */
2040 	gps = rnp->gp_seq;
2041 	mask = rnp->grpmask;
2042 	raw_spin_unlock_rcu_node(rnp);	/* irqs remain disabled. */
2043 	raw_spin_lock_rcu_node(rnp_p);	/* irqs already disabled. */
2044 	rcu_report_qs_rnp(mask, rnp_p, gps, flags);
2045 }
2046 
2047 /*
2048  * Record a quiescent state for the specified CPU to that CPU's rcu_data
2049  * structure.  This must be called from the specified CPU.
2050  */
2051 static void
rcu_report_qs_rdp(struct rcu_data * rdp)2052 rcu_report_qs_rdp(struct rcu_data *rdp)
2053 {
2054 	unsigned long flags;
2055 	unsigned long mask;
2056 	bool needwake = false;
2057 	bool needacc = false;
2058 	struct rcu_node *rnp;
2059 
2060 	WARN_ON_ONCE(rdp->cpu != smp_processor_id());
2061 	rnp = rdp->mynode;
2062 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
2063 	if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq ||
2064 	    rdp->gpwrap) {
2065 
2066 		/*
2067 		 * The grace period in which this quiescent state was
2068 		 * recorded has ended, so don't report it upwards.
2069 		 * We will instead need a new quiescent state that lies
2070 		 * within the current grace period.
2071 		 */
2072 		rdp->cpu_no_qs.b.norm = true;	/* need qs for new gp. */
2073 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2074 		return;
2075 	}
2076 	mask = rdp->grpmask;
2077 	rdp->core_needs_qs = false;
2078 	if ((rnp->qsmask & mask) == 0) {
2079 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2080 	} else {
2081 		/*
2082 		 * This GP can't end until cpu checks in, so all of our
2083 		 * callbacks can be processed during the next GP.
2084 		 *
2085 		 * NOCB kthreads have their own way to deal with that...
2086 		 */
2087 		if (!rcu_rdp_is_offloaded(rdp)) {
2088 			needwake = rcu_accelerate_cbs(rnp, rdp);
2089 		} else if (!rcu_segcblist_completely_offloaded(&rdp->cblist)) {
2090 			/*
2091 			 * ...but NOCB kthreads may miss or delay callbacks acceleration
2092 			 * if in the middle of a (de-)offloading process.
2093 			 */
2094 			needacc = true;
2095 		}
2096 
2097 		rcu_disable_urgency_upon_qs(rdp);
2098 		rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
2099 		/* ^^^ Released rnp->lock */
2100 		if (needwake)
2101 			rcu_gp_kthread_wake();
2102 
2103 		if (needacc) {
2104 			rcu_nocb_lock_irqsave(rdp, flags);
2105 			rcu_accelerate_cbs_unlocked(rnp, rdp);
2106 			rcu_nocb_unlock_irqrestore(rdp, flags);
2107 		}
2108 	}
2109 }
2110 
2111 /*
2112  * Check to see if there is a new grace period of which this CPU
2113  * is not yet aware, and if so, set up local rcu_data state for it.
2114  * Otherwise, see if this CPU has just passed through its first
2115  * quiescent state for this grace period, and record that fact if so.
2116  */
2117 static void
rcu_check_quiescent_state(struct rcu_data * rdp)2118 rcu_check_quiescent_state(struct rcu_data *rdp)
2119 {
2120 	/* Check for grace-period ends and beginnings. */
2121 	note_gp_changes(rdp);
2122 
2123 	/*
2124 	 * Does this CPU still need to do its part for current grace period?
2125 	 * If no, return and let the other CPUs do their part as well.
2126 	 */
2127 	if (!rdp->core_needs_qs)
2128 		return;
2129 
2130 	/*
2131 	 * Was there a quiescent state since the beginning of the grace
2132 	 * period? If no, then exit and wait for the next call.
2133 	 */
2134 	if (rdp->cpu_no_qs.b.norm)
2135 		return;
2136 
2137 	/*
2138 	 * Tell RCU we are done (but rcu_report_qs_rdp() will be the
2139 	 * judge of that).
2140 	 */
2141 	rcu_report_qs_rdp(rdp);
2142 }
2143 
2144 /*
2145  * Near the end of the offline process.  Trace the fact that this CPU
2146  * is going offline.
2147  */
rcutree_dying_cpu(unsigned int cpu)2148 int rcutree_dying_cpu(unsigned int cpu)
2149 {
2150 	bool blkd;
2151 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
2152 	struct rcu_node *rnp = rdp->mynode;
2153 
2154 	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
2155 		return 0;
2156 
2157 	blkd = !!(rnp->qsmask & rdp->grpmask);
2158 	trace_rcu_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
2159 			       blkd ? TPS("cpuofl-bgp") : TPS("cpuofl"));
2160 	return 0;
2161 }
2162 
2163 /*
2164  * All CPUs for the specified rcu_node structure have gone offline,
2165  * and all tasks that were preempted within an RCU read-side critical
2166  * section while running on one of those CPUs have since exited their RCU
2167  * read-side critical section.  Some other CPU is reporting this fact with
2168  * the specified rcu_node structure's ->lock held and interrupts disabled.
2169  * This function therefore goes up the tree of rcu_node structures,
2170  * clearing the corresponding bits in the ->qsmaskinit fields.  Note that
2171  * the leaf rcu_node structure's ->qsmaskinit field has already been
2172  * updated.
2173  *
2174  * This function does check that the specified rcu_node structure has
2175  * all CPUs offline and no blocked tasks, so it is OK to invoke it
2176  * prematurely.  That said, invoking it after the fact will cost you
2177  * a needless lock acquisition.  So once it has done its work, don't
2178  * invoke it again.
2179  */
rcu_cleanup_dead_rnp(struct rcu_node * rnp_leaf)2180 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
2181 {
2182 	long mask;
2183 	struct rcu_node *rnp = rnp_leaf;
2184 
2185 	raw_lockdep_assert_held_rcu_node(rnp_leaf);
2186 	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
2187 	    WARN_ON_ONCE(rnp_leaf->qsmaskinit) ||
2188 	    WARN_ON_ONCE(rcu_preempt_has_tasks(rnp_leaf)))
2189 		return;
2190 	for (;;) {
2191 		mask = rnp->grpmask;
2192 		rnp = rnp->parent;
2193 		if (!rnp)
2194 			break;
2195 		raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
2196 		rnp->qsmaskinit &= ~mask;
2197 		/* Between grace periods, so better already be zero! */
2198 		WARN_ON_ONCE(rnp->qsmask);
2199 		if (rnp->qsmaskinit) {
2200 			raw_spin_unlock_rcu_node(rnp);
2201 			/* irqs remain disabled. */
2202 			return;
2203 		}
2204 		raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
2205 	}
2206 }
2207 
2208 /*
2209  * The CPU has been completely removed, and some other CPU is reporting
2210  * this fact from process context.  Do the remainder of the cleanup.
2211  * There can only be one CPU hotplug operation at a time, so no need for
2212  * explicit locking.
2213  */
rcutree_dead_cpu(unsigned int cpu)2214 int rcutree_dead_cpu(unsigned int cpu)
2215 {
2216 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
2217 	struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */
2218 
2219 	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
2220 		return 0;
2221 
2222 	WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus - 1);
2223 	/* Adjust any no-longer-needed kthreads. */
2224 	rcu_boost_kthread_setaffinity(rnp, -1);
2225 	// Stop-machine done, so allow nohz_full to disable tick.
2226 	tick_dep_clear(TICK_DEP_BIT_RCU);
2227 	return 0;
2228 }
2229 
2230 /*
2231  * Invoke any RCU callbacks that have made it to the end of their grace
2232  * period.  Throttle as specified by rdp->blimit.
2233  */
rcu_do_batch(struct rcu_data * rdp)2234 static void rcu_do_batch(struct rcu_data *rdp)
2235 {
2236 	int div;
2237 	bool __maybe_unused empty;
2238 	unsigned long flags;
2239 	struct rcu_head *rhp;
2240 	struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
2241 	long bl, count = 0;
2242 	long pending, tlimit = 0;
2243 
2244 	/* If no callbacks are ready, just return. */
2245 	if (!rcu_segcblist_ready_cbs(&rdp->cblist)) {
2246 		trace_rcu_batch_start(rcu_state.name,
2247 				      rcu_segcblist_n_cbs(&rdp->cblist), 0);
2248 		trace_rcu_batch_end(rcu_state.name, 0,
2249 				    !rcu_segcblist_empty(&rdp->cblist),
2250 				    need_resched(), is_idle_task(current),
2251 				    rcu_is_callbacks_kthread(rdp));
2252 		return;
2253 	}
2254 
2255 	/*
2256 	 * Extract the list of ready callbacks, disabling IRQs to prevent
2257 	 * races with call_rcu() from interrupt handlers.  Leave the
2258 	 * callback counts, as rcu_barrier() needs to be conservative.
2259 	 */
2260 	rcu_nocb_lock_irqsave(rdp, flags);
2261 	WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
2262 	pending = rcu_segcblist_n_cbs(&rdp->cblist);
2263 	div = READ_ONCE(rcu_divisor);
2264 	div = div < 0 ? 7 : div > sizeof(long) * 8 - 2 ? sizeof(long) * 8 - 2 : div;
2265 	bl = max(rdp->blimit, pending >> div);
2266 	if (in_serving_softirq() && unlikely(bl > 100)) {
2267 		long rrn = READ_ONCE(rcu_resched_ns);
2268 
2269 		rrn = rrn < NSEC_PER_MSEC ? NSEC_PER_MSEC : rrn > NSEC_PER_SEC ? NSEC_PER_SEC : rrn;
2270 		tlimit = local_clock() + rrn;
2271 	}
2272 	trace_rcu_batch_start(rcu_state.name,
2273 			      rcu_segcblist_n_cbs(&rdp->cblist), bl);
2274 	rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl);
2275 	if (rcu_rdp_is_offloaded(rdp))
2276 		rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
2277 
2278 	trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbDequeued"));
2279 	rcu_nocb_unlock_irqrestore(rdp, flags);
2280 
2281 	/* Invoke callbacks. */
2282 	tick_dep_set_task(current, TICK_DEP_BIT_RCU);
2283 	rhp = rcu_cblist_dequeue(&rcl);
2284 
2285 	for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) {
2286 		rcu_callback_t f;
2287 
2288 		count++;
2289 		debug_rcu_head_unqueue(rhp);
2290 
2291 		rcu_lock_acquire(&rcu_callback_map);
2292 		trace_rcu_invoke_callback(rcu_state.name, rhp);
2293 
2294 		f = rhp->func;
2295 		WRITE_ONCE(rhp->func, (rcu_callback_t)0L);
2296 		f(rhp);
2297 
2298 		rcu_lock_release(&rcu_callback_map);
2299 
2300 		/*
2301 		 * Stop only if limit reached and CPU has something to do.
2302 		 */
2303 		if (in_serving_softirq()) {
2304 			if (count >= bl && (need_resched() || !is_idle_task(current)))
2305 				break;
2306 			/*
2307 			 * Make sure we don't spend too much time here and deprive other
2308 			 * softirq vectors of CPU cycles.
2309 			 */
2310 			if (unlikely(tlimit)) {
2311 				/* only call local_clock() every 32 callbacks */
2312 				if (likely((count & 31) || local_clock() < tlimit))
2313 					continue;
2314 				/* Exceeded the time limit, so leave. */
2315 				break;
2316 			}
2317 		} else {
2318 			local_bh_enable();
2319 			lockdep_assert_irqs_enabled();
2320 			cond_resched_tasks_rcu_qs();
2321 			lockdep_assert_irqs_enabled();
2322 			local_bh_disable();
2323 		}
2324 	}
2325 
2326 	rcu_nocb_lock_irqsave(rdp, flags);
2327 	rdp->n_cbs_invoked += count;
2328 	trace_rcu_batch_end(rcu_state.name, count, !!rcl.head, need_resched(),
2329 			    is_idle_task(current), rcu_is_callbacks_kthread(rdp));
2330 
2331 	/* Update counts and requeue any remaining callbacks. */
2332 	rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl);
2333 	rcu_segcblist_add_len(&rdp->cblist, -count);
2334 
2335 	/* Reinstate batch limit if we have worked down the excess. */
2336 	count = rcu_segcblist_n_cbs(&rdp->cblist);
2337 	if (rdp->blimit >= DEFAULT_MAX_RCU_BLIMIT && count <= qlowmark)
2338 		rdp->blimit = blimit;
2339 
2340 	/* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
2341 	if (count == 0 && rdp->qlen_last_fqs_check != 0) {
2342 		rdp->qlen_last_fqs_check = 0;
2343 		rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
2344 	} else if (count < rdp->qlen_last_fqs_check - qhimark)
2345 		rdp->qlen_last_fqs_check = count;
2346 
2347 	/*
2348 	 * The following usually indicates a double call_rcu().  To track
2349 	 * this down, try building with CONFIG_DEBUG_OBJECTS_RCU_HEAD=y.
2350 	 */
2351 	empty = rcu_segcblist_empty(&rdp->cblist);
2352 	WARN_ON_ONCE(count == 0 && !empty);
2353 	WARN_ON_ONCE(!IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
2354 		     count != 0 && empty);
2355 	WARN_ON_ONCE(count == 0 && rcu_segcblist_n_segment_cbs(&rdp->cblist) != 0);
2356 	WARN_ON_ONCE(!empty && rcu_segcblist_n_segment_cbs(&rdp->cblist) == 0);
2357 
2358 	rcu_nocb_unlock_irqrestore(rdp, flags);
2359 
2360 	tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
2361 }
2362 
2363 /*
2364  * This function is invoked from each scheduling-clock interrupt,
2365  * and checks to see if this CPU is in a non-context-switch quiescent
2366  * state, for example, user mode or idle loop.  It also schedules RCU
2367  * core processing.  If the current grace period has gone on too long,
2368  * it will ask the scheduler to manufacture a context switch for the sole
2369  * purpose of providing the needed quiescent state.
2370  */
rcu_sched_clock_irq(int user)2371 void rcu_sched_clock_irq(int user)
2372 {
2373 	unsigned long j;
2374 
2375 	if (IS_ENABLED(CONFIG_PROVE_RCU)) {
2376 		j = jiffies;
2377 		WARN_ON_ONCE(time_before(j, __this_cpu_read(rcu_data.last_sched_clock)));
2378 		__this_cpu_write(rcu_data.last_sched_clock, j);
2379 	}
2380 	trace_rcu_utilization(TPS("Start scheduler-tick"));
2381 	lockdep_assert_irqs_disabled();
2382 	raw_cpu_inc(rcu_data.ticks_this_gp);
2383 	/* The load-acquire pairs with the store-release setting to true. */
2384 	if (smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) {
2385 		/* Idle and userspace execution already are quiescent states. */
2386 		if (!rcu_is_cpu_rrupt_from_idle() && !user) {
2387 			set_tsk_need_resched(current);
2388 			set_preempt_need_resched();
2389 		}
2390 		__this_cpu_write(rcu_data.rcu_urgent_qs, false);
2391 	}
2392 	rcu_flavor_sched_clock_irq(user);
2393 	if (rcu_pending(user))
2394 		invoke_rcu_core();
2395 	if (user || rcu_is_cpu_rrupt_from_idle())
2396 		rcu_note_voluntary_context_switch(current);
2397 	lockdep_assert_irqs_disabled();
2398 
2399 	trace_rcu_utilization(TPS("End scheduler-tick"));
2400 }
2401 
2402 /*
2403  * Scan the leaf rcu_node structures.  For each structure on which all
2404  * CPUs have reported a quiescent state and on which there are tasks
2405  * blocking the current grace period, initiate RCU priority boosting.
2406  * Otherwise, invoke the specified function to check dyntick state for
2407  * each CPU that has not yet reported a quiescent state.
2408  */
force_qs_rnp(int (* f)(struct rcu_data * rdp))2409 static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
2410 {
2411 	int cpu;
2412 	unsigned long flags;
2413 	unsigned long mask;
2414 	struct rcu_data *rdp;
2415 	struct rcu_node *rnp;
2416 
2417 	rcu_state.cbovld = rcu_state.cbovldnext;
2418 	rcu_state.cbovldnext = false;
2419 	rcu_for_each_leaf_node(rnp) {
2420 		cond_resched_tasks_rcu_qs();
2421 		mask = 0;
2422 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
2423 		rcu_state.cbovldnext |= !!rnp->cbovldmask;
2424 		if (rnp->qsmask == 0) {
2425 			if (rcu_preempt_blocked_readers_cgp(rnp)) {
2426 				/*
2427 				 * No point in scanning bits because they
2428 				 * are all zero.  But we might need to
2429 				 * priority-boost blocked readers.
2430 				 */
2431 				rcu_initiate_boost(rnp, flags);
2432 				/* rcu_initiate_boost() releases rnp->lock */
2433 				continue;
2434 			}
2435 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2436 			continue;
2437 		}
2438 		for_each_leaf_node_cpu_mask(rnp, cpu, rnp->qsmask) {
2439 			rdp = per_cpu_ptr(&rcu_data, cpu);
2440 			if (f(rdp)) {
2441 				mask |= rdp->grpmask;
2442 				rcu_disable_urgency_upon_qs(rdp);
2443 			}
2444 		}
2445 		if (mask != 0) {
2446 			/* Idle/offline CPUs, report (releases rnp->lock). */
2447 			rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
2448 		} else {
2449 			/* Nothing to do here, so just drop the lock. */
2450 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2451 		}
2452 	}
2453 }
2454 
2455 /*
2456  * Force quiescent states on reluctant CPUs, and also detect which
2457  * CPUs are in dyntick-idle mode.
2458  */
rcu_force_quiescent_state(void)2459 void rcu_force_quiescent_state(void)
2460 {
2461 	unsigned long flags;
2462 	bool ret;
2463 	struct rcu_node *rnp;
2464 	struct rcu_node *rnp_old = NULL;
2465 
2466 	/* Funnel through hierarchy to reduce memory contention. */
2467 	rnp = raw_cpu_read(rcu_data.mynode);
2468 	for (; rnp != NULL; rnp = rnp->parent) {
2469 		ret = (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) ||
2470 		       !raw_spin_trylock(&rnp->fqslock);
2471 		if (rnp_old != NULL)
2472 			raw_spin_unlock(&rnp_old->fqslock);
2473 		if (ret)
2474 			return;
2475 		rnp_old = rnp;
2476 	}
2477 	/* rnp_old == rcu_get_root(), rnp == NULL. */
2478 
2479 	/* Reached the root of the rcu_node tree, acquire lock. */
2480 	raw_spin_lock_irqsave_rcu_node(rnp_old, flags);
2481 	raw_spin_unlock(&rnp_old->fqslock);
2482 	if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
2483 		raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2484 		return;  /* Someone beat us to it. */
2485 	}
2486 	WRITE_ONCE(rcu_state.gp_flags,
2487 		   READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS);
2488 	raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2489 	rcu_gp_kthread_wake();
2490 }
2491 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
2492 
2493 // Workqueue handler for an RCU reader for kernels enforcing struct RCU
2494 // grace periods.
strict_work_handler(struct work_struct * work)2495 static void strict_work_handler(struct work_struct *work)
2496 {
2497 	rcu_read_lock();
2498 	rcu_read_unlock();
2499 }
2500 
2501 /* Perform RCU core processing work for the current CPU.  */
rcu_core(void)2502 static __latent_entropy void rcu_core(void)
2503 {
2504 	unsigned long flags;
2505 	struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);
2506 	struct rcu_node *rnp = rdp->mynode;
2507 	/*
2508 	 * On RT rcu_core() can be preempted when IRQs aren't disabled.
2509 	 * Therefore this function can race with concurrent NOCB (de-)offloading
2510 	 * on this CPU and the below condition must be considered volatile.
2511 	 * However if we race with:
2512 	 *
2513 	 * _ Offloading:   In the worst case we accelerate or process callbacks
2514 	 *                 concurrently with NOCB kthreads. We are guaranteed to
2515 	 *                 call rcu_nocb_lock() if that happens.
2516 	 *
2517 	 * _ Deoffloading: In the worst case we miss callbacks acceleration or
2518 	 *                 processing. This is fine because the early stage
2519 	 *                 of deoffloading invokes rcu_core() after setting
2520 	 *                 SEGCBLIST_RCU_CORE. So we guarantee that we'll process
2521 	 *                 what could have been dismissed without the need to wait
2522 	 *                 for the next rcu_pending() check in the next jiffy.
2523 	 */
2524 	const bool do_batch = !rcu_segcblist_completely_offloaded(&rdp->cblist);
2525 
2526 	if (cpu_is_offline(smp_processor_id()))
2527 		return;
2528 	trace_rcu_utilization(TPS("Start RCU core"));
2529 	WARN_ON_ONCE(!rdp->beenonline);
2530 
2531 	/* Report any deferred quiescent states if preemption enabled. */
2532 	if (IS_ENABLED(CONFIG_PREEMPT_COUNT) && (!(preempt_count() & PREEMPT_MASK))) {
2533 		rcu_preempt_deferred_qs(current);
2534 	} else if (rcu_preempt_need_deferred_qs(current)) {
2535 		set_tsk_need_resched(current);
2536 		set_preempt_need_resched();
2537 	}
2538 
2539 	/* Update RCU state based on any recent quiescent states. */
2540 	rcu_check_quiescent_state(rdp);
2541 
2542 	/* No grace period and unregistered callbacks? */
2543 	if (!rcu_gp_in_progress() &&
2544 	    rcu_segcblist_is_enabled(&rdp->cblist) && do_batch) {
2545 		rcu_nocb_lock_irqsave(rdp, flags);
2546 		if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
2547 			rcu_accelerate_cbs_unlocked(rnp, rdp);
2548 		rcu_nocb_unlock_irqrestore(rdp, flags);
2549 	}
2550 
2551 	rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check());
2552 
2553 	/* If there are callbacks ready, invoke them. */
2554 	if (do_batch && rcu_segcblist_ready_cbs(&rdp->cblist) &&
2555 	    likely(READ_ONCE(rcu_scheduler_fully_active))) {
2556 		rcu_do_batch(rdp);
2557 		/* Re-invoke RCU core processing if there are callbacks remaining. */
2558 		if (rcu_segcblist_ready_cbs(&rdp->cblist))
2559 			invoke_rcu_core();
2560 	}
2561 
2562 	/* Do any needed deferred wakeups of rcuo kthreads. */
2563 	do_nocb_deferred_wakeup(rdp);
2564 	trace_rcu_utilization(TPS("End RCU core"));
2565 
2566 	// If strict GPs, schedule an RCU reader in a clean environment.
2567 	if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
2568 		queue_work_on(rdp->cpu, rcu_gp_wq, &rdp->strict_work);
2569 }
2570 
rcu_core_si(struct softirq_action * h)2571 static void rcu_core_si(struct softirq_action *h)
2572 {
2573 	rcu_core();
2574 }
2575 
rcu_wake_cond(struct task_struct * t,int status)2576 static void rcu_wake_cond(struct task_struct *t, int status)
2577 {
2578 	/*
2579 	 * If the thread is yielding, only wake it when this
2580 	 * is invoked from idle
2581 	 */
2582 	if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current)))
2583 		wake_up_process(t);
2584 }
2585 
invoke_rcu_core_kthread(void)2586 static void invoke_rcu_core_kthread(void)
2587 {
2588 	struct task_struct *t;
2589 	unsigned long flags;
2590 
2591 	local_irq_save(flags);
2592 	__this_cpu_write(rcu_data.rcu_cpu_has_work, 1);
2593 	t = __this_cpu_read(rcu_data.rcu_cpu_kthread_task);
2594 	if (t != NULL && t != current)
2595 		rcu_wake_cond(t, __this_cpu_read(rcu_data.rcu_cpu_kthread_status));
2596 	local_irq_restore(flags);
2597 }
2598 
2599 /*
2600  * Wake up this CPU's rcuc kthread to do RCU core processing.
2601  */
invoke_rcu_core(void)2602 static void invoke_rcu_core(void)
2603 {
2604 	if (!cpu_online(smp_processor_id()))
2605 		return;
2606 	if (use_softirq)
2607 		raise_softirq(RCU_SOFTIRQ);
2608 	else
2609 		invoke_rcu_core_kthread();
2610 }
2611 
rcu_cpu_kthread_park(unsigned int cpu)2612 static void rcu_cpu_kthread_park(unsigned int cpu)
2613 {
2614 	per_cpu(rcu_data.rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
2615 }
2616 
rcu_cpu_kthread_should_run(unsigned int cpu)2617 static int rcu_cpu_kthread_should_run(unsigned int cpu)
2618 {
2619 	return __this_cpu_read(rcu_data.rcu_cpu_has_work);
2620 }
2621 
2622 /*
2623  * Per-CPU kernel thread that invokes RCU callbacks.  This replaces
2624  * the RCU softirq used in configurations of RCU that do not support RCU
2625  * priority boosting.
2626  */
rcu_cpu_kthread(unsigned int cpu)2627 static void rcu_cpu_kthread(unsigned int cpu)
2628 {
2629 	unsigned int *statusp = this_cpu_ptr(&rcu_data.rcu_cpu_kthread_status);
2630 	char work, *workp = this_cpu_ptr(&rcu_data.rcu_cpu_has_work);
2631 	unsigned long *j = this_cpu_ptr(&rcu_data.rcuc_activity);
2632 	int spincnt;
2633 
2634 	trace_rcu_utilization(TPS("Start CPU kthread@rcu_run"));
2635 	for (spincnt = 0; spincnt < 10; spincnt++) {
2636 		WRITE_ONCE(*j, jiffies);
2637 		local_bh_disable();
2638 		*statusp = RCU_KTHREAD_RUNNING;
2639 		local_irq_disable();
2640 		work = *workp;
2641 		*workp = 0;
2642 		local_irq_enable();
2643 		if (work)
2644 			rcu_core();
2645 		local_bh_enable();
2646 		if (*workp == 0) {
2647 			trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
2648 			*statusp = RCU_KTHREAD_WAITING;
2649 			return;
2650 		}
2651 	}
2652 	*statusp = RCU_KTHREAD_YIELDING;
2653 	trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
2654 	schedule_timeout_idle(2);
2655 	trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
2656 	*statusp = RCU_KTHREAD_WAITING;
2657 	WRITE_ONCE(*j, jiffies);
2658 }
2659 
2660 static struct smp_hotplug_thread rcu_cpu_thread_spec = {
2661 	.store			= &rcu_data.rcu_cpu_kthread_task,
2662 	.thread_should_run	= rcu_cpu_kthread_should_run,
2663 	.thread_fn		= rcu_cpu_kthread,
2664 	.thread_comm		= "rcuc/%u",
2665 	.setup			= rcu_cpu_kthread_setup,
2666 	.park			= rcu_cpu_kthread_park,
2667 };
2668 
2669 /*
2670  * Spawn per-CPU RCU core processing kthreads.
2671  */
rcu_spawn_core_kthreads(void)2672 static int __init rcu_spawn_core_kthreads(void)
2673 {
2674 	int cpu;
2675 
2676 	for_each_possible_cpu(cpu)
2677 		per_cpu(rcu_data.rcu_cpu_has_work, cpu) = 0;
2678 	if (use_softirq)
2679 		return 0;
2680 	WARN_ONCE(smpboot_register_percpu_thread(&rcu_cpu_thread_spec),
2681 		  "%s: Could not start rcuc kthread, OOM is now expected behavior\n", __func__);
2682 	return 0;
2683 }
2684 
2685 /*
2686  * Handle any core-RCU processing required by a call_rcu() invocation.
2687  */
__call_rcu_core(struct rcu_data * rdp,struct rcu_head * head,unsigned long flags)2688 static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head,
2689 			    unsigned long flags)
2690 {
2691 	/*
2692 	 * If called from an extended quiescent state, invoke the RCU
2693 	 * core in order to force a re-evaluation of RCU's idleness.
2694 	 */
2695 	if (!rcu_is_watching())
2696 		invoke_rcu_core();
2697 
2698 	/* If interrupts were disabled or CPU offline, don't invoke RCU core. */
2699 	if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id()))
2700 		return;
2701 
2702 	/*
2703 	 * Force the grace period if too many callbacks or too long waiting.
2704 	 * Enforce hysteresis, and don't invoke rcu_force_quiescent_state()
2705 	 * if some other CPU has recently done so.  Also, don't bother
2706 	 * invoking rcu_force_quiescent_state() if the newly enqueued callback
2707 	 * is the only one waiting for a grace period to complete.
2708 	 */
2709 	if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) >
2710 		     rdp->qlen_last_fqs_check + qhimark)) {
2711 
2712 		/* Are we ignoring a completed grace period? */
2713 		note_gp_changes(rdp);
2714 
2715 		/* Start a new grace period if one not already started. */
2716 		if (!rcu_gp_in_progress()) {
2717 			rcu_accelerate_cbs_unlocked(rdp->mynode, rdp);
2718 		} else {
2719 			/* Give the grace period a kick. */
2720 			rdp->blimit = DEFAULT_MAX_RCU_BLIMIT;
2721 			if (READ_ONCE(rcu_state.n_force_qs) == rdp->n_force_qs_snap &&
2722 			    rcu_segcblist_first_pend_cb(&rdp->cblist) != head)
2723 				rcu_force_quiescent_state();
2724 			rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
2725 			rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
2726 		}
2727 	}
2728 }
2729 
2730 /*
2731  * RCU callback function to leak a callback.
2732  */
rcu_leak_callback(struct rcu_head * rhp)2733 static void rcu_leak_callback(struct rcu_head *rhp)
2734 {
2735 }
2736 
2737 /*
2738  * Check and if necessary update the leaf rcu_node structure's
2739  * ->cbovldmask bit corresponding to the current CPU based on that CPU's
2740  * number of queued RCU callbacks.  The caller must hold the leaf rcu_node
2741  * structure's ->lock.
2742  */
check_cb_ovld_locked(struct rcu_data * rdp,struct rcu_node * rnp)2743 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp)
2744 {
2745 	raw_lockdep_assert_held_rcu_node(rnp);
2746 	if (qovld_calc <= 0)
2747 		return; // Early boot and wildcard value set.
2748 	if (rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc)
2749 		WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask | rdp->grpmask);
2750 	else
2751 		WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask & ~rdp->grpmask);
2752 }
2753 
2754 /*
2755  * Check and if necessary update the leaf rcu_node structure's
2756  * ->cbovldmask bit corresponding to the current CPU based on that CPU's
2757  * number of queued RCU callbacks.  No locks need be held, but the
2758  * caller must have disabled interrupts.
2759  *
2760  * Note that this function ignores the possibility that there are a lot
2761  * of callbacks all of which have already seen the end of their respective
2762  * grace periods.  This omission is due to the need for no-CBs CPUs to
2763  * be holding ->nocb_lock to do this check, which is too heavy for a
2764  * common-case operation.
2765  */
check_cb_ovld(struct rcu_data * rdp)2766 static void check_cb_ovld(struct rcu_data *rdp)
2767 {
2768 	struct rcu_node *const rnp = rdp->mynode;
2769 
2770 	if (qovld_calc <= 0 ||
2771 	    ((rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc) ==
2772 	     !!(READ_ONCE(rnp->cbovldmask) & rdp->grpmask)))
2773 		return; // Early boot wildcard value or already set correctly.
2774 	raw_spin_lock_rcu_node(rnp);
2775 	check_cb_ovld_locked(rdp, rnp);
2776 	raw_spin_unlock_rcu_node(rnp);
2777 }
2778 
2779 static void
__call_rcu_common(struct rcu_head * head,rcu_callback_t func,bool lazy_in)2780 __call_rcu_common(struct rcu_head *head, rcu_callback_t func, bool lazy_in)
2781 {
2782 	static atomic_t doublefrees;
2783 	unsigned long flags;
2784 	bool lazy;
2785 	struct rcu_data *rdp;
2786 	bool was_alldone;
2787 
2788 	/* Misaligned rcu_head! */
2789 	WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1));
2790 
2791 	if (debug_rcu_head_queue(head)) {
2792 		/*
2793 		 * Probable double call_rcu(), so leak the callback.
2794 		 * Use rcu:rcu_callback trace event to find the previous
2795 		 * time callback was passed to call_rcu().
2796 		 */
2797 		if (atomic_inc_return(&doublefrees) < 4) {
2798 			pr_err("%s(): Double-freed CB %p->%pS()!!!  ", __func__, head, head->func);
2799 			mem_dump_obj(head);
2800 		}
2801 		WRITE_ONCE(head->func, rcu_leak_callback);
2802 		return;
2803 	}
2804 	head->func = func;
2805 	head->next = NULL;
2806 	kasan_record_aux_stack_noalloc(head);
2807 	local_irq_save(flags);
2808 	rdp = this_cpu_ptr(&rcu_data);
2809 	lazy = lazy_in && !rcu_async_should_hurry();
2810 
2811 	/* Add the callback to our list. */
2812 	if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist))) {
2813 		// This can trigger due to call_rcu() from offline CPU:
2814 		WARN_ON_ONCE(rcu_scheduler_active != RCU_SCHEDULER_INACTIVE);
2815 		WARN_ON_ONCE(!rcu_is_watching());
2816 		// Very early boot, before rcu_init().  Initialize if needed
2817 		// and then drop through to queue the callback.
2818 		if (rcu_segcblist_empty(&rdp->cblist))
2819 			rcu_segcblist_init(&rdp->cblist);
2820 	}
2821 
2822 	check_cb_ovld(rdp);
2823 	if (rcu_nocb_try_bypass(rdp, head, &was_alldone, flags, lazy))
2824 		return; // Enqueued onto ->nocb_bypass, so just leave.
2825 	// If no-CBs CPU gets here, rcu_nocb_try_bypass() acquired ->nocb_lock.
2826 	rcu_segcblist_enqueue(&rdp->cblist, head);
2827 	if (__is_kvfree_rcu_offset((unsigned long)func))
2828 		trace_rcu_kvfree_callback(rcu_state.name, head,
2829 					 (unsigned long)func,
2830 					 rcu_segcblist_n_cbs(&rdp->cblist));
2831 	else
2832 		trace_rcu_callback(rcu_state.name, head,
2833 				   rcu_segcblist_n_cbs(&rdp->cblist));
2834 
2835 	trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCBQueued"));
2836 
2837 	/* Go handle any RCU core processing required. */
2838 	if (unlikely(rcu_rdp_is_offloaded(rdp))) {
2839 		__call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */
2840 	} else {
2841 		__call_rcu_core(rdp, head, flags);
2842 		local_irq_restore(flags);
2843 	}
2844 }
2845 
2846 #ifdef CONFIG_RCU_LAZY
2847 static bool enable_rcu_lazy __read_mostly = !IS_ENABLED(CONFIG_RCU_LAZY_DEFAULT_OFF);
2848 module_param(enable_rcu_lazy, bool, 0444);
2849 
2850 /**
2851  * call_rcu_hurry() - Queue RCU callback for invocation after grace period, and
2852  * flush all lazy callbacks (including the new one) to the main ->cblist while
2853  * doing so.
2854  *
2855  * @head: structure to be used for queueing the RCU updates.
2856  * @func: actual callback function to be invoked after the grace period
2857  *
2858  * The callback function will be invoked some time after a full grace
2859  * period elapses, in other words after all pre-existing RCU read-side
2860  * critical sections have completed.
2861  *
2862  * Use this API instead of call_rcu() if you don't want the callback to be
2863  * invoked after very long periods of time, which can happen on systems without
2864  * memory pressure and on systems which are lightly loaded or mostly idle.
2865  * This function will cause callbacks to be invoked sooner than later at the
2866  * expense of extra power. Other than that, this function is identical to, and
2867  * reuses call_rcu()'s logic. Refer to call_rcu() for more details about memory
2868  * ordering and other functionality.
2869  */
call_rcu_hurry(struct rcu_head * head,rcu_callback_t func)2870 void call_rcu_hurry(struct rcu_head *head, rcu_callback_t func)
2871 {
2872 	return __call_rcu_common(head, func, false);
2873 }
2874 EXPORT_SYMBOL_GPL(call_rcu_hurry);
2875 #else
2876 #define enable_rcu_lazy		false
2877 #endif
2878 
2879 /**
2880  * call_rcu() - Queue an RCU callback for invocation after a grace period.
2881  * By default the callbacks are 'lazy' and are kept hidden from the main
2882  * ->cblist to prevent starting of grace periods too soon.
2883  * If you desire grace periods to start very soon, use call_rcu_hurry().
2884  *
2885  * @head: structure to be used for queueing the RCU updates.
2886  * @func: actual callback function to be invoked after the grace period
2887  *
2888  * The callback function will be invoked some time after a full grace
2889  * period elapses, in other words after all pre-existing RCU read-side
2890  * critical sections have completed.  However, the callback function
2891  * might well execute concurrently with RCU read-side critical sections
2892  * that started after call_rcu() was invoked.
2893  *
2894  * RCU read-side critical sections are delimited by rcu_read_lock()
2895  * and rcu_read_unlock(), and may be nested.  In addition, but only in
2896  * v5.0 and later, regions of code across which interrupts, preemption,
2897  * or softirqs have been disabled also serve as RCU read-side critical
2898  * sections.  This includes hardware interrupt handlers, softirq handlers,
2899  * and NMI handlers.
2900  *
2901  * Note that all CPUs must agree that the grace period extended beyond
2902  * all pre-existing RCU read-side critical section.  On systems with more
2903  * than one CPU, this means that when "func()" is invoked, each CPU is
2904  * guaranteed to have executed a full memory barrier since the end of its
2905  * last RCU read-side critical section whose beginning preceded the call
2906  * to call_rcu().  It also means that each CPU executing an RCU read-side
2907  * critical section that continues beyond the start of "func()" must have
2908  * executed a memory barrier after the call_rcu() but before the beginning
2909  * of that RCU read-side critical section.  Note that these guarantees
2910  * include CPUs that are offline, idle, or executing in user mode, as
2911  * well as CPUs that are executing in the kernel.
2912  *
2913  * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
2914  * resulting RCU callback function "func()", then both CPU A and CPU B are
2915  * guaranteed to execute a full memory barrier during the time interval
2916  * between the call to call_rcu() and the invocation of "func()" -- even
2917  * if CPU A and CPU B are the same CPU (but again only if the system has
2918  * more than one CPU).
2919  *
2920  * Implementation of these memory-ordering guarantees is described here:
2921  * Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst.
2922  */
call_rcu(struct rcu_head * head,rcu_callback_t func)2923 void call_rcu(struct rcu_head *head, rcu_callback_t func)
2924 {
2925 	__call_rcu_common(head, func, enable_rcu_lazy);
2926 }
2927 EXPORT_SYMBOL_GPL(call_rcu);
2928 
2929 /* Maximum number of jiffies to wait before draining a batch. */
2930 #define KFREE_DRAIN_JIFFIES (5 * HZ)
2931 #define KFREE_N_BATCHES 2
2932 #define FREE_N_CHANNELS 2
2933 
2934 /**
2935  * struct kvfree_rcu_bulk_data - single block to store kvfree_rcu() pointers
2936  * @nr_records: Number of active pointers in the array
2937  * @next: Next bulk object in the block chain
2938  * @records: Array of the kvfree_rcu() pointers
2939  */
2940 struct kvfree_rcu_bulk_data {
2941 	unsigned long nr_records;
2942 	struct kvfree_rcu_bulk_data *next;
2943 	void *records[];
2944 };
2945 
2946 /*
2947  * This macro defines how many entries the "records" array
2948  * will contain. It is based on the fact that the size of
2949  * kvfree_rcu_bulk_data structure becomes exactly one page.
2950  */
2951 #define KVFREE_BULK_MAX_ENTR \
2952 	((PAGE_SIZE - sizeof(struct kvfree_rcu_bulk_data)) / sizeof(void *))
2953 
2954 /**
2955  * struct kfree_rcu_cpu_work - single batch of kfree_rcu() requests
2956  * @rcu_work: Let queue_rcu_work() invoke workqueue handler after grace period
2957  * @head_free: List of kfree_rcu() objects waiting for a grace period
2958  * @bkvhead_free: Bulk-List of kvfree_rcu() objects waiting for a grace period
2959  * @krcp: Pointer to @kfree_rcu_cpu structure
2960  */
2961 
2962 struct kfree_rcu_cpu_work {
2963 	struct rcu_work rcu_work;
2964 	struct rcu_head *head_free;
2965 	struct kvfree_rcu_bulk_data *bkvhead_free[FREE_N_CHANNELS];
2966 	struct kfree_rcu_cpu *krcp;
2967 };
2968 
2969 /**
2970  * struct kfree_rcu_cpu - batch up kfree_rcu() requests for RCU grace period
2971  * @head: List of kfree_rcu() objects not yet waiting for a grace period
2972  * @bkvhead: Bulk-List of kvfree_rcu() objects not yet waiting for a grace period
2973  * @krw_arr: Array of batches of kfree_rcu() objects waiting for a grace period
2974  * @lock: Synchronize access to this structure
2975  * @monitor_work: Promote @head to @head_free after KFREE_DRAIN_JIFFIES
2976  * @initialized: The @rcu_work fields have been initialized
2977  * @count: Number of objects for which GP not started
2978  * @bkvcache:
2979  *	A simple cache list that contains objects for reuse purpose.
2980  *	In order to save some per-cpu space the list is singular.
2981  *	Even though it is lockless an access has to be protected by the
2982  *	per-cpu lock.
2983  * @page_cache_work: A work to refill the cache when it is empty
2984  * @backoff_page_cache_fill: Delay cache refills
2985  * @work_in_progress: Indicates that page_cache_work is running
2986  * @hrtimer: A hrtimer for scheduling a page_cache_work
2987  * @nr_bkv_objs: number of allocated objects at @bkvcache.
2988  *
2989  * This is a per-CPU structure.  The reason that it is not included in
2990  * the rcu_data structure is to permit this code to be extracted from
2991  * the RCU files.  Such extraction could allow further optimization of
2992  * the interactions with the slab allocators.
2993  */
2994 struct kfree_rcu_cpu {
2995 	struct rcu_head *head;
2996 	struct kvfree_rcu_bulk_data *bkvhead[FREE_N_CHANNELS];
2997 	struct kfree_rcu_cpu_work krw_arr[KFREE_N_BATCHES];
2998 	raw_spinlock_t lock;
2999 	struct delayed_work monitor_work;
3000 	bool initialized;
3001 	int count;
3002 
3003 	struct delayed_work page_cache_work;
3004 	atomic_t backoff_page_cache_fill;
3005 	atomic_t work_in_progress;
3006 	struct hrtimer hrtimer;
3007 
3008 	struct llist_head bkvcache;
3009 	int nr_bkv_objs;
3010 };
3011 
3012 static DEFINE_PER_CPU(struct kfree_rcu_cpu, krc) = {
3013 	.lock = __RAW_SPIN_LOCK_UNLOCKED(krc.lock),
3014 };
3015 
3016 static __always_inline void
debug_rcu_bhead_unqueue(struct kvfree_rcu_bulk_data * bhead)3017 debug_rcu_bhead_unqueue(struct kvfree_rcu_bulk_data *bhead)
3018 {
3019 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
3020 	int i;
3021 
3022 	for (i = 0; i < bhead->nr_records; i++)
3023 		debug_rcu_head_unqueue((struct rcu_head *)(bhead->records[i]));
3024 #endif
3025 }
3026 
3027 static inline struct kfree_rcu_cpu *
krc_this_cpu_lock(unsigned long * flags)3028 krc_this_cpu_lock(unsigned long *flags)
3029 {
3030 	struct kfree_rcu_cpu *krcp;
3031 
3032 	local_irq_save(*flags);	// For safely calling this_cpu_ptr().
3033 	krcp = this_cpu_ptr(&krc);
3034 	raw_spin_lock(&krcp->lock);
3035 
3036 	return krcp;
3037 }
3038 
3039 static inline void
krc_this_cpu_unlock(struct kfree_rcu_cpu * krcp,unsigned long flags)3040 krc_this_cpu_unlock(struct kfree_rcu_cpu *krcp, unsigned long flags)
3041 {
3042 	raw_spin_unlock_irqrestore(&krcp->lock, flags);
3043 }
3044 
3045 static inline struct kvfree_rcu_bulk_data *
get_cached_bnode(struct kfree_rcu_cpu * krcp)3046 get_cached_bnode(struct kfree_rcu_cpu *krcp)
3047 {
3048 	if (!krcp->nr_bkv_objs)
3049 		return NULL;
3050 
3051 	WRITE_ONCE(krcp->nr_bkv_objs, krcp->nr_bkv_objs - 1);
3052 	return (struct kvfree_rcu_bulk_data *)
3053 		llist_del_first(&krcp->bkvcache);
3054 }
3055 
3056 static inline bool
put_cached_bnode(struct kfree_rcu_cpu * krcp,struct kvfree_rcu_bulk_data * bnode)3057 put_cached_bnode(struct kfree_rcu_cpu *krcp,
3058 	struct kvfree_rcu_bulk_data *bnode)
3059 {
3060 	// Check the limit.
3061 	if (krcp->nr_bkv_objs >= rcu_min_cached_objs)
3062 		return false;
3063 
3064 	llist_add((struct llist_node *) bnode, &krcp->bkvcache);
3065 	WRITE_ONCE(krcp->nr_bkv_objs, krcp->nr_bkv_objs + 1);
3066 	return true;
3067 }
3068 
3069 static int
drain_page_cache(struct kfree_rcu_cpu * krcp)3070 drain_page_cache(struct kfree_rcu_cpu *krcp)
3071 {
3072 	unsigned long flags;
3073 	struct llist_node *page_list, *pos, *n;
3074 	int freed = 0;
3075 
3076 	raw_spin_lock_irqsave(&krcp->lock, flags);
3077 	page_list = llist_del_all(&krcp->bkvcache);
3078 	WRITE_ONCE(krcp->nr_bkv_objs, 0);
3079 	raw_spin_unlock_irqrestore(&krcp->lock, flags);
3080 
3081 	llist_for_each_safe(pos, n, page_list) {
3082 		free_page((unsigned long)pos);
3083 		freed++;
3084 	}
3085 
3086 	return freed;
3087 }
3088 
3089 /*
3090  * This function is invoked in workqueue context after a grace period.
3091  * It frees all the objects queued on ->bkvhead_free or ->head_free.
3092  */
kfree_rcu_work(struct work_struct * work)3093 static void kfree_rcu_work(struct work_struct *work)
3094 {
3095 	unsigned long flags;
3096 	struct kvfree_rcu_bulk_data *bkvhead[FREE_N_CHANNELS], *bnext;
3097 	struct rcu_head *head, *next;
3098 	struct kfree_rcu_cpu *krcp;
3099 	struct kfree_rcu_cpu_work *krwp;
3100 	int i, j;
3101 
3102 	krwp = container_of(to_rcu_work(work),
3103 			    struct kfree_rcu_cpu_work, rcu_work);
3104 	krcp = krwp->krcp;
3105 
3106 	raw_spin_lock_irqsave(&krcp->lock, flags);
3107 	// Channels 1 and 2.
3108 	for (i = 0; i < FREE_N_CHANNELS; i++) {
3109 		bkvhead[i] = krwp->bkvhead_free[i];
3110 		krwp->bkvhead_free[i] = NULL;
3111 	}
3112 
3113 	// Channel 3.
3114 	head = krwp->head_free;
3115 	krwp->head_free = NULL;
3116 	raw_spin_unlock_irqrestore(&krcp->lock, flags);
3117 
3118 	// Handle the first two channels.
3119 	for (i = 0; i < FREE_N_CHANNELS; i++) {
3120 		for (; bkvhead[i]; bkvhead[i] = bnext) {
3121 			bnext = bkvhead[i]->next;
3122 			debug_rcu_bhead_unqueue(bkvhead[i]);
3123 
3124 			rcu_lock_acquire(&rcu_callback_map);
3125 			if (i == 0) { // kmalloc() / kfree().
3126 				trace_rcu_invoke_kfree_bulk_callback(
3127 					rcu_state.name, bkvhead[i]->nr_records,
3128 					bkvhead[i]->records);
3129 
3130 				kfree_bulk(bkvhead[i]->nr_records,
3131 					bkvhead[i]->records);
3132 			} else { // vmalloc() / vfree().
3133 				for (j = 0; j < bkvhead[i]->nr_records; j++) {
3134 					trace_rcu_invoke_kvfree_callback(
3135 						rcu_state.name,
3136 						bkvhead[i]->records[j], 0);
3137 
3138 					vfree(bkvhead[i]->records[j]);
3139 				}
3140 			}
3141 			rcu_lock_release(&rcu_callback_map);
3142 
3143 			raw_spin_lock_irqsave(&krcp->lock, flags);
3144 			if (put_cached_bnode(krcp, bkvhead[i]))
3145 				bkvhead[i] = NULL;
3146 			raw_spin_unlock_irqrestore(&krcp->lock, flags);
3147 
3148 			if (bkvhead[i])
3149 				free_page((unsigned long) bkvhead[i]);
3150 
3151 			cond_resched_tasks_rcu_qs();
3152 		}
3153 	}
3154 
3155 	/*
3156 	 * This is used when the "bulk" path can not be used for the
3157 	 * double-argument of kvfree_rcu().  This happens when the
3158 	 * page-cache is empty, which means that objects are instead
3159 	 * queued on a linked list through their rcu_head structures.
3160 	 * This list is named "Channel 3".
3161 	 */
3162 	for (; head; head = next) {
3163 		unsigned long offset = (unsigned long)head->func;
3164 		void *ptr = (void *)head - offset;
3165 
3166 		next = head->next;
3167 		debug_rcu_head_unqueue((struct rcu_head *)ptr);
3168 		rcu_lock_acquire(&rcu_callback_map);
3169 		trace_rcu_invoke_kvfree_callback(rcu_state.name, head, offset);
3170 
3171 		if (!WARN_ON_ONCE(!__is_kvfree_rcu_offset(offset)))
3172 			kvfree(ptr);
3173 
3174 		rcu_lock_release(&rcu_callback_map);
3175 		cond_resched_tasks_rcu_qs();
3176 	}
3177 }
3178 
3179 static bool
need_offload_krc(struct kfree_rcu_cpu * krcp)3180 need_offload_krc(struct kfree_rcu_cpu *krcp)
3181 {
3182 	int i;
3183 
3184 	for (i = 0; i < FREE_N_CHANNELS; i++)
3185 		if (krcp->bkvhead[i])
3186 			return true;
3187 
3188 	return !!krcp->head;
3189 }
3190 
3191 static bool
need_wait_for_krwp_work(struct kfree_rcu_cpu_work * krwp)3192 need_wait_for_krwp_work(struct kfree_rcu_cpu_work *krwp)
3193 {
3194 	int i;
3195 
3196 	for (i = 0; i < FREE_N_CHANNELS; i++)
3197 		if (krwp->bkvhead_free[i])
3198 			return true;
3199 
3200 	return !!krwp->head_free;
3201 }
3202 
3203 static void
schedule_delayed_monitor_work(struct kfree_rcu_cpu * krcp)3204 schedule_delayed_monitor_work(struct kfree_rcu_cpu *krcp)
3205 {
3206 	long delay, delay_left;
3207 
3208 	delay = READ_ONCE(krcp->count) >= KVFREE_BULK_MAX_ENTR ? 1:KFREE_DRAIN_JIFFIES;
3209 	if (delayed_work_pending(&krcp->monitor_work)) {
3210 		delay_left = krcp->monitor_work.timer.expires - jiffies;
3211 		if (delay < delay_left)
3212 			mod_delayed_work(system_wq, &krcp->monitor_work, delay);
3213 		return;
3214 	}
3215 	queue_delayed_work(system_wq, &krcp->monitor_work, delay);
3216 }
3217 
3218 /*
3219  * This function is invoked after the KFREE_DRAIN_JIFFIES timeout.
3220  */
kfree_rcu_monitor(struct work_struct * work)3221 static void kfree_rcu_monitor(struct work_struct *work)
3222 {
3223 	struct kfree_rcu_cpu *krcp = container_of(work,
3224 		struct kfree_rcu_cpu, monitor_work.work);
3225 	unsigned long flags;
3226 	int i, j;
3227 
3228 	raw_spin_lock_irqsave(&krcp->lock, flags);
3229 
3230 	// Attempt to start a new batch.
3231 	for (i = 0; i < KFREE_N_BATCHES; i++) {
3232 		struct kfree_rcu_cpu_work *krwp = &(krcp->krw_arr[i]);
3233 
3234 		// Try to detach bulk_head or head and attach it, only when
3235 		// all channels are free.  Any channel is not free means at krwp
3236 		// there is on-going rcu work to handle krwp's free business.
3237 		if (need_wait_for_krwp_work(krwp))
3238 			continue;
3239 
3240 		if (need_offload_krc(krcp)) {
3241 			// Channel 1 corresponds to the SLAB-pointer bulk path.
3242 			// Channel 2 corresponds to vmalloc-pointer bulk path.
3243 			for (j = 0; j < FREE_N_CHANNELS; j++) {
3244 				if (!krwp->bkvhead_free[j]) {
3245 					krwp->bkvhead_free[j] = krcp->bkvhead[j];
3246 					krcp->bkvhead[j] = NULL;
3247 				}
3248 			}
3249 
3250 			// Channel 3 corresponds to both SLAB and vmalloc
3251 			// objects queued on the linked list.
3252 			if (!krwp->head_free) {
3253 				krwp->head_free = krcp->head;
3254 				krcp->head = NULL;
3255 			}
3256 
3257 			WRITE_ONCE(krcp->count, 0);
3258 
3259 			// One work is per one batch, so there are three
3260 			// "free channels", the batch can handle. It can
3261 			// be that the work is in the pending state when
3262 			// channels have been detached following by each
3263 			// other.
3264 			queue_rcu_work(system_wq, &krwp->rcu_work);
3265 		}
3266 	}
3267 
3268 	// If there is nothing to detach, it means that our job is
3269 	// successfully done here. In case of having at least one
3270 	// of the channels that is still busy we should rearm the
3271 	// work to repeat an attempt. Because previous batches are
3272 	// still in progress.
3273 	if (need_offload_krc(krcp))
3274 		schedule_delayed_monitor_work(krcp);
3275 
3276 	raw_spin_unlock_irqrestore(&krcp->lock, flags);
3277 }
3278 
3279 static enum hrtimer_restart
schedule_page_work_fn(struct hrtimer * t)3280 schedule_page_work_fn(struct hrtimer *t)
3281 {
3282 	struct kfree_rcu_cpu *krcp =
3283 		container_of(t, struct kfree_rcu_cpu, hrtimer);
3284 
3285 	queue_delayed_work(system_highpri_wq, &krcp->page_cache_work, 0);
3286 	return HRTIMER_NORESTART;
3287 }
3288 
fill_page_cache_func(struct work_struct * work)3289 static void fill_page_cache_func(struct work_struct *work)
3290 {
3291 	struct kvfree_rcu_bulk_data *bnode;
3292 	struct kfree_rcu_cpu *krcp =
3293 		container_of(work, struct kfree_rcu_cpu,
3294 			page_cache_work.work);
3295 	unsigned long flags;
3296 	int nr_pages;
3297 	bool pushed;
3298 	int i;
3299 
3300 	nr_pages = atomic_read(&krcp->backoff_page_cache_fill) ?
3301 		1 : rcu_min_cached_objs;
3302 
3303 	for (i = 0; i < nr_pages; i++) {
3304 		bnode = (struct kvfree_rcu_bulk_data *)
3305 			__get_free_page(GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
3306 
3307 		if (!bnode)
3308 			break;
3309 
3310 		raw_spin_lock_irqsave(&krcp->lock, flags);
3311 		pushed = put_cached_bnode(krcp, bnode);
3312 		raw_spin_unlock_irqrestore(&krcp->lock, flags);
3313 
3314 		if (!pushed) {
3315 			free_page((unsigned long) bnode);
3316 			break;
3317 		}
3318 	}
3319 
3320 	atomic_set(&krcp->work_in_progress, 0);
3321 	atomic_set(&krcp->backoff_page_cache_fill, 0);
3322 }
3323 
3324 static void
run_page_cache_worker(struct kfree_rcu_cpu * krcp)3325 run_page_cache_worker(struct kfree_rcu_cpu *krcp)
3326 {
3327 	if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
3328 			!atomic_xchg(&krcp->work_in_progress, 1)) {
3329 		if (atomic_read(&krcp->backoff_page_cache_fill)) {
3330 			queue_delayed_work(system_wq,
3331 				&krcp->page_cache_work,
3332 					msecs_to_jiffies(rcu_delay_page_cache_fill_msec));
3333 		} else {
3334 			hrtimer_init(&krcp->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3335 			krcp->hrtimer.function = schedule_page_work_fn;
3336 			hrtimer_start(&krcp->hrtimer, 0, HRTIMER_MODE_REL);
3337 		}
3338 	}
3339 }
3340 
3341 // Record ptr in a page managed by krcp, with the pre-krc_this_cpu_lock()
3342 // state specified by flags.  If can_alloc is true, the caller must
3343 // be schedulable and not be holding any locks or mutexes that might be
3344 // acquired by the memory allocator or anything that it might invoke.
3345 // Returns true if ptr was successfully recorded, else the caller must
3346 // use a fallback.
3347 static inline bool
add_ptr_to_bulk_krc_lock(struct kfree_rcu_cpu ** krcp,unsigned long * flags,void * ptr,bool can_alloc)3348 add_ptr_to_bulk_krc_lock(struct kfree_rcu_cpu **krcp,
3349 	unsigned long *flags, void *ptr, bool can_alloc)
3350 {
3351 	struct kvfree_rcu_bulk_data *bnode;
3352 	int idx;
3353 
3354 	*krcp = krc_this_cpu_lock(flags);
3355 	if (unlikely(!(*krcp)->initialized))
3356 		return false;
3357 
3358 	idx = !!is_vmalloc_addr(ptr);
3359 
3360 	/* Check if a new block is required. */
3361 	if (!(*krcp)->bkvhead[idx] ||
3362 			(*krcp)->bkvhead[idx]->nr_records == KVFREE_BULK_MAX_ENTR) {
3363 		bnode = get_cached_bnode(*krcp);
3364 		if (!bnode && can_alloc) {
3365 			krc_this_cpu_unlock(*krcp, *flags);
3366 
3367 			// __GFP_NORETRY - allows a light-weight direct reclaim
3368 			// what is OK from minimizing of fallback hitting point of
3369 			// view. Apart of that it forbids any OOM invoking what is
3370 			// also beneficial since we are about to release memory soon.
3371 			//
3372 			// __GFP_NOMEMALLOC - prevents from consuming of all the
3373 			// memory reserves. Please note we have a fallback path.
3374 			//
3375 			// __GFP_NOWARN - it is supposed that an allocation can
3376 			// be failed under low memory or high memory pressure
3377 			// scenarios.
3378 			bnode = (struct kvfree_rcu_bulk_data *)
3379 				__get_free_page(GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
3380 			*krcp = krc_this_cpu_lock(flags);
3381 		}
3382 
3383 		if (!bnode)
3384 			return false;
3385 
3386 		/* Initialize the new block. */
3387 		bnode->nr_records = 0;
3388 		bnode->next = (*krcp)->bkvhead[idx];
3389 
3390 		/* Attach it to the head. */
3391 		(*krcp)->bkvhead[idx] = bnode;
3392 	}
3393 
3394 	/* Finally insert. */
3395 	(*krcp)->bkvhead[idx]->records
3396 		[(*krcp)->bkvhead[idx]->nr_records++] = ptr;
3397 
3398 	return true;
3399 }
3400 
3401 /*
3402  * Queue a request for lazy invocation of the appropriate free routine
3403  * after a grace period.  Please note that three paths are maintained,
3404  * two for the common case using arrays of pointers and a third one that
3405  * is used only when the main paths cannot be used, for example, due to
3406  * memory pressure.
3407  *
3408  * Each kvfree_call_rcu() request is added to a batch. The batch will be drained
3409  * every KFREE_DRAIN_JIFFIES number of jiffies. All the objects in the batch will
3410  * be free'd in workqueue context. This allows us to: batch requests together to
3411  * reduce the number of grace periods during heavy kfree_rcu()/kvfree_rcu() load.
3412  */
kvfree_call_rcu(struct rcu_head * head,rcu_callback_t func)3413 void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
3414 {
3415 	unsigned long flags;
3416 	struct kfree_rcu_cpu *krcp;
3417 	bool success;
3418 	void *ptr;
3419 
3420 	if (head) {
3421 		ptr = (void *) head - (unsigned long) func;
3422 	} else {
3423 		/*
3424 		 * Please note there is a limitation for the head-less
3425 		 * variant, that is why there is a clear rule for such
3426 		 * objects: it can be used from might_sleep() context
3427 		 * only. For other places please embed an rcu_head to
3428 		 * your data.
3429 		 */
3430 		might_sleep();
3431 		ptr = (unsigned long *) func;
3432 	}
3433 
3434 	// Queue the object but don't yet schedule the batch.
3435 	if (debug_rcu_head_queue(ptr)) {
3436 		// Probable double kfree_rcu(), just leak.
3437 		WARN_ONCE(1, "%s(): Double-freed call. rcu_head %p\n",
3438 			  __func__, head);
3439 
3440 		// Mark as success and leave.
3441 		return;
3442 	}
3443 
3444 	kasan_record_aux_stack_noalloc(ptr);
3445 	success = add_ptr_to_bulk_krc_lock(&krcp, &flags, ptr, !head);
3446 	if (!success) {
3447 		run_page_cache_worker(krcp);
3448 
3449 		if (head == NULL)
3450 			// Inline if kvfree_rcu(one_arg) call.
3451 			goto unlock_return;
3452 
3453 		head->func = func;
3454 		head->next = krcp->head;
3455 		krcp->head = head;
3456 		success = true;
3457 	}
3458 
3459 	WRITE_ONCE(krcp->count, krcp->count + 1);
3460 
3461 	/*
3462 	 * The kvfree_rcu() caller considers the pointer freed at this point
3463 	 * and likely removes any references to it. Since the actual slab
3464 	 * freeing (and kmemleak_free()) is deferred, tell kmemleak to ignore
3465 	 * this object (no scanning or false positives reporting).
3466 	 */
3467 	kmemleak_ignore(ptr);
3468 
3469 	// Set timer to drain after KFREE_DRAIN_JIFFIES.
3470 	if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING)
3471 		schedule_delayed_monitor_work(krcp);
3472 
3473 unlock_return:
3474 	krc_this_cpu_unlock(krcp, flags);
3475 
3476 	/*
3477 	 * Inline kvfree() after synchronize_rcu(). We can do
3478 	 * it from might_sleep() context only, so the current
3479 	 * CPU can pass the QS state.
3480 	 */
3481 	if (!success) {
3482 		debug_rcu_head_unqueue((struct rcu_head *) ptr);
3483 		synchronize_rcu();
3484 		kvfree(ptr);
3485 	}
3486 }
3487 EXPORT_SYMBOL_GPL(kvfree_call_rcu);
3488 
3489 static unsigned long
kfree_rcu_shrink_count(struct shrinker * shrink,struct shrink_control * sc)3490 kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
3491 {
3492 	int cpu;
3493 	unsigned long count = 0;
3494 
3495 	/* Snapshot count of all CPUs */
3496 	for_each_possible_cpu(cpu) {
3497 		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3498 
3499 		count += READ_ONCE(krcp->count);
3500 		count += READ_ONCE(krcp->nr_bkv_objs);
3501 		atomic_set(&krcp->backoff_page_cache_fill, 1);
3502 	}
3503 
3504 	return count == 0 ? SHRINK_EMPTY : count;
3505 }
3506 
3507 static unsigned long
kfree_rcu_shrink_scan(struct shrinker * shrink,struct shrink_control * sc)3508 kfree_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
3509 {
3510 	int cpu, freed = 0;
3511 
3512 	for_each_possible_cpu(cpu) {
3513 		int count;
3514 		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3515 
3516 		count = krcp->count;
3517 		count += drain_page_cache(krcp);
3518 		kfree_rcu_monitor(&krcp->monitor_work.work);
3519 
3520 		sc->nr_to_scan -= count;
3521 		freed += count;
3522 
3523 		if (sc->nr_to_scan <= 0)
3524 			break;
3525 	}
3526 
3527 	return freed == 0 ? SHRINK_STOP : freed;
3528 }
3529 
3530 static struct shrinker kfree_rcu_shrinker = {
3531 	.count_objects = kfree_rcu_shrink_count,
3532 	.scan_objects = kfree_rcu_shrink_scan,
3533 	.batch = 0,
3534 	.seeks = DEFAULT_SEEKS,
3535 };
3536 
kfree_rcu_scheduler_running(void)3537 void __init kfree_rcu_scheduler_running(void)
3538 {
3539 	int cpu;
3540 	unsigned long flags;
3541 
3542 	for_each_possible_cpu(cpu) {
3543 		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3544 
3545 		raw_spin_lock_irqsave(&krcp->lock, flags);
3546 		if (need_offload_krc(krcp))
3547 			schedule_delayed_monitor_work(krcp);
3548 		raw_spin_unlock_irqrestore(&krcp->lock, flags);
3549 	}
3550 }
3551 
3552 /*
3553  * During early boot, any blocking grace-period wait automatically
3554  * implies a grace period.
3555  *
3556  * Later on, this could in theory be the case for kernels built with
3557  * CONFIG_SMP=y && CONFIG_PREEMPTION=y running on a single CPU, but this
3558  * is not a common case.  Furthermore, this optimization would cause
3559  * the rcu_gp_oldstate structure to expand by 50%, so this potential
3560  * grace-period optimization is ignored once the scheduler is running.
3561  */
rcu_blocking_is_gp(void)3562 static int rcu_blocking_is_gp(void)
3563 {
3564 	if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE)
3565 		return false;
3566 	might_sleep();  /* Check for RCU read-side critical section. */
3567 	return true;
3568 }
3569 
3570 /**
3571  * synchronize_rcu - wait until a grace period has elapsed.
3572  *
3573  * Control will return to the caller some time after a full grace
3574  * period has elapsed, in other words after all currently executing RCU
3575  * read-side critical sections have completed.  Note, however, that
3576  * upon return from synchronize_rcu(), the caller might well be executing
3577  * concurrently with new RCU read-side critical sections that began while
3578  * synchronize_rcu() was waiting.
3579  *
3580  * RCU read-side critical sections are delimited by rcu_read_lock()
3581  * and rcu_read_unlock(), and may be nested.  In addition, but only in
3582  * v5.0 and later, regions of code across which interrupts, preemption,
3583  * or softirqs have been disabled also serve as RCU read-side critical
3584  * sections.  This includes hardware interrupt handlers, softirq handlers,
3585  * and NMI handlers.
3586  *
3587  * Note that this guarantee implies further memory-ordering guarantees.
3588  * On systems with more than one CPU, when synchronize_rcu() returns,
3589  * each CPU is guaranteed to have executed a full memory barrier since
3590  * the end of its last RCU read-side critical section whose beginning
3591  * preceded the call to synchronize_rcu().  In addition, each CPU having
3592  * an RCU read-side critical section that extends beyond the return from
3593  * synchronize_rcu() is guaranteed to have executed a full memory barrier
3594  * after the beginning of synchronize_rcu() and before the beginning of
3595  * that RCU read-side critical section.  Note that these guarantees include
3596  * CPUs that are offline, idle, or executing in user mode, as well as CPUs
3597  * that are executing in the kernel.
3598  *
3599  * Furthermore, if CPU A invoked synchronize_rcu(), which returned
3600  * to its caller on CPU B, then both CPU A and CPU B are guaranteed
3601  * to have executed a full memory barrier during the execution of
3602  * synchronize_rcu() -- even if CPU A and CPU B are the same CPU (but
3603  * again only if the system has more than one CPU).
3604  *
3605  * Implementation of these memory-ordering guarantees is described here:
3606  * Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst.
3607  */
synchronize_rcu(void)3608 void synchronize_rcu(void)
3609 {
3610 	unsigned long flags;
3611 	struct rcu_node *rnp;
3612 
3613 	RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
3614 			 lock_is_held(&rcu_lock_map) ||
3615 			 lock_is_held(&rcu_sched_lock_map),
3616 			 "Illegal synchronize_rcu() in RCU read-side critical section");
3617 	if (!rcu_blocking_is_gp()) {
3618 		if (rcu_gp_is_expedited())
3619 			synchronize_rcu_expedited();
3620 		else
3621 			wait_rcu_gp(call_rcu_hurry);
3622 		return;
3623 	}
3624 
3625 	// Context allows vacuous grace periods.
3626 	// Note well that this code runs with !PREEMPT && !SMP.
3627 	// In addition, all code that advances grace periods runs at
3628 	// process level.  Therefore, this normal GP overlaps with other
3629 	// normal GPs only by being fully nested within them, which allows
3630 	// reuse of ->gp_seq_polled_snap.
3631 	rcu_poll_gp_seq_start_unlocked(&rcu_state.gp_seq_polled_snap);
3632 	rcu_poll_gp_seq_end_unlocked(&rcu_state.gp_seq_polled_snap);
3633 
3634 	// Update the normal grace-period counters to record
3635 	// this grace period, but only those used by the boot CPU.
3636 	// The rcu_scheduler_starting() will take care of the rest of
3637 	// these counters.
3638 	local_irq_save(flags);
3639 	WARN_ON_ONCE(num_online_cpus() > 1);
3640 	rcu_state.gp_seq += (1 << RCU_SEQ_CTR_SHIFT);
3641 	for (rnp = this_cpu_ptr(&rcu_data)->mynode; rnp; rnp = rnp->parent)
3642 		rnp->gp_seq_needed = rnp->gp_seq = rcu_state.gp_seq;
3643 	local_irq_restore(flags);
3644 }
3645 EXPORT_SYMBOL_GPL(synchronize_rcu);
3646 
3647 /**
3648  * get_completed_synchronize_rcu_full - Return a full pre-completed polled state cookie
3649  * @rgosp: Place to put state cookie
3650  *
3651  * Stores into @rgosp a value that will always be treated by functions
3652  * like poll_state_synchronize_rcu_full() as a cookie whose grace period
3653  * has already completed.
3654  */
get_completed_synchronize_rcu_full(struct rcu_gp_oldstate * rgosp)3655 void get_completed_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
3656 {
3657 	rgosp->rgos_norm = RCU_GET_STATE_COMPLETED;
3658 	rgosp->rgos_exp = RCU_GET_STATE_COMPLETED;
3659 }
3660 EXPORT_SYMBOL_GPL(get_completed_synchronize_rcu_full);
3661 
3662 /**
3663  * get_state_synchronize_rcu - Snapshot current RCU state
3664  *
3665  * Returns a cookie that is used by a later call to cond_synchronize_rcu()
3666  * or poll_state_synchronize_rcu() to determine whether or not a full
3667  * grace period has elapsed in the meantime.
3668  */
get_state_synchronize_rcu(void)3669 unsigned long get_state_synchronize_rcu(void)
3670 {
3671 	/*
3672 	 * Any prior manipulation of RCU-protected data must happen
3673 	 * before the load from ->gp_seq.
3674 	 */
3675 	smp_mb();  /* ^^^ */
3676 	return rcu_seq_snap(&rcu_state.gp_seq_polled);
3677 }
3678 EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
3679 
3680 /**
3681  * get_state_synchronize_rcu_full - Snapshot RCU state, both normal and expedited
3682  * @rgosp: location to place combined normal/expedited grace-period state
3683  *
3684  * Places the normal and expedited grace-period states in @rgosp.  This
3685  * state value can be passed to a later call to cond_synchronize_rcu_full()
3686  * or poll_state_synchronize_rcu_full() to determine whether or not a
3687  * grace period (whether normal or expedited) has elapsed in the meantime.
3688  * The rcu_gp_oldstate structure takes up twice the memory of an unsigned
3689  * long, but is guaranteed to see all grace periods.  In contrast, the
3690  * combined state occupies less memory, but can sometimes fail to take
3691  * grace periods into account.
3692  *
3693  * This does not guarantee that the needed grace period will actually
3694  * start.
3695  */
get_state_synchronize_rcu_full(struct rcu_gp_oldstate * rgosp)3696 void get_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
3697 {
3698 	struct rcu_node *rnp = rcu_get_root();
3699 
3700 	/*
3701 	 * Any prior manipulation of RCU-protected data must happen
3702 	 * before the loads from ->gp_seq and ->expedited_sequence.
3703 	 */
3704 	smp_mb();  /* ^^^ */
3705 	rgosp->rgos_norm = rcu_seq_snap(&rnp->gp_seq);
3706 	rgosp->rgos_exp = rcu_seq_snap(&rcu_state.expedited_sequence);
3707 }
3708 EXPORT_SYMBOL_GPL(get_state_synchronize_rcu_full);
3709 
3710 /*
3711  * Helper function for start_poll_synchronize_rcu() and
3712  * start_poll_synchronize_rcu_full().
3713  */
start_poll_synchronize_rcu_common(void)3714 static void start_poll_synchronize_rcu_common(void)
3715 {
3716 	unsigned long flags;
3717 	bool needwake;
3718 	struct rcu_data *rdp;
3719 	struct rcu_node *rnp;
3720 
3721 	lockdep_assert_irqs_enabled();
3722 	local_irq_save(flags);
3723 	rdp = this_cpu_ptr(&rcu_data);
3724 	rnp = rdp->mynode;
3725 	raw_spin_lock_rcu_node(rnp); // irqs already disabled.
3726 	// Note it is possible for a grace period to have elapsed between
3727 	// the above call to get_state_synchronize_rcu() and the below call
3728 	// to rcu_seq_snap.  This is OK, the worst that happens is that we
3729 	// get a grace period that no one needed.  These accesses are ordered
3730 	// by smp_mb(), and we are accessing them in the opposite order
3731 	// from which they are updated at grace-period start, as required.
3732 	needwake = rcu_start_this_gp(rnp, rdp, rcu_seq_snap(&rcu_state.gp_seq));
3733 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3734 	if (needwake)
3735 		rcu_gp_kthread_wake();
3736 }
3737 
3738 /**
3739  * start_poll_synchronize_rcu - Snapshot and start RCU grace period
3740  *
3741  * Returns a cookie that is used by a later call to cond_synchronize_rcu()
3742  * or poll_state_synchronize_rcu() to determine whether or not a full
3743  * grace period has elapsed in the meantime.  If the needed grace period
3744  * is not already slated to start, notifies RCU core of the need for that
3745  * grace period.
3746  *
3747  * Interrupts must be enabled for the case where it is necessary to awaken
3748  * the grace-period kthread.
3749  */
start_poll_synchronize_rcu(void)3750 unsigned long start_poll_synchronize_rcu(void)
3751 {
3752 	unsigned long gp_seq = get_state_synchronize_rcu();
3753 
3754 	start_poll_synchronize_rcu_common();
3755 	return gp_seq;
3756 }
3757 EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu);
3758 
3759 /**
3760  * start_poll_synchronize_rcu_full - Take a full snapshot and start RCU grace period
3761  * @rgosp: value from get_state_synchronize_rcu_full() or start_poll_synchronize_rcu_full()
3762  *
3763  * Places the normal and expedited grace-period states in *@rgos.  This
3764  * state value can be passed to a later call to cond_synchronize_rcu_full()
3765  * or poll_state_synchronize_rcu_full() to determine whether or not a
3766  * grace period (whether normal or expedited) has elapsed in the meantime.
3767  * If the needed grace period is not already slated to start, notifies
3768  * RCU core of the need for that grace period.
3769  *
3770  * Interrupts must be enabled for the case where it is necessary to awaken
3771  * the grace-period kthread.
3772  */
start_poll_synchronize_rcu_full(struct rcu_gp_oldstate * rgosp)3773 void start_poll_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
3774 {
3775 	get_state_synchronize_rcu_full(rgosp);
3776 
3777 	start_poll_synchronize_rcu_common();
3778 }
3779 EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu_full);
3780 
3781 /**
3782  * poll_state_synchronize_rcu - Has the specified RCU grace period completed?
3783  * @oldstate: value from get_state_synchronize_rcu() or start_poll_synchronize_rcu()
3784  *
3785  * If a full RCU grace period has elapsed since the earlier call from
3786  * which @oldstate was obtained, return @true, otherwise return @false.
3787  * If @false is returned, it is the caller's responsibility to invoke this
3788  * function later on until it does return @true.  Alternatively, the caller
3789  * can explicitly wait for a grace period, for example, by passing @oldstate
3790  * to cond_synchronize_rcu() or by directly invoking synchronize_rcu().
3791  *
3792  * Yes, this function does not take counter wrap into account.
3793  * But counter wrap is harmless.  If the counter wraps, we have waited for
3794  * more than a billion grace periods (and way more on a 64-bit system!).
3795  * Those needing to keep old state values for very long time periods
3796  * (many hours even on 32-bit systems) should check them occasionally and
3797  * either refresh them or set a flag indicating that the grace period has
3798  * completed.  Alternatively, they can use get_completed_synchronize_rcu()
3799  * to get a guaranteed-completed grace-period state.
3800  *
3801  * This function provides the same memory-ordering guarantees that
3802  * would be provided by a synchronize_rcu() that was invoked at the call
3803  * to the function that provided @oldstate, and that returned at the end
3804  * of this function.
3805  */
poll_state_synchronize_rcu(unsigned long oldstate)3806 bool poll_state_synchronize_rcu(unsigned long oldstate)
3807 {
3808 	if (oldstate == RCU_GET_STATE_COMPLETED ||
3809 	    rcu_seq_done_exact(&rcu_state.gp_seq_polled, oldstate)) {
3810 		smp_mb(); /* Ensure GP ends before subsequent accesses. */
3811 		return true;
3812 	}
3813 	return false;
3814 }
3815 EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu);
3816 
3817 /**
3818  * poll_state_synchronize_rcu_full - Has the specified RCU grace period completed?
3819  * @rgosp: value from get_state_synchronize_rcu_full() or start_poll_synchronize_rcu_full()
3820  *
3821  * If a full RCU grace period has elapsed since the earlier call from
3822  * which *rgosp was obtained, return @true, otherwise return @false.
3823  * If @false is returned, it is the caller's responsibility to invoke this
3824  * function later on until it does return @true.  Alternatively, the caller
3825  * can explicitly wait for a grace period, for example, by passing @rgosp
3826  * to cond_synchronize_rcu() or by directly invoking synchronize_rcu().
3827  *
3828  * Yes, this function does not take counter wrap into account.
3829  * But counter wrap is harmless.  If the counter wraps, we have waited
3830  * for more than a billion grace periods (and way more on a 64-bit
3831  * system!).  Those needing to keep rcu_gp_oldstate values for very
3832  * long time periods (many hours even on 32-bit systems) should check
3833  * them occasionally and either refresh them or set a flag indicating
3834  * that the grace period has completed.  Alternatively, they can use
3835  * get_completed_synchronize_rcu_full() to get a guaranteed-completed
3836  * grace-period state.
3837  *
3838  * This function provides the same memory-ordering guarantees that would
3839  * be provided by a synchronize_rcu() that was invoked at the call to
3840  * the function that provided @rgosp, and that returned at the end of this
3841  * function.  And this guarantee requires that the root rcu_node structure's
3842  * ->gp_seq field be checked instead of that of the rcu_state structure.
3843  * The problem is that the just-ending grace-period's callbacks can be
3844  * invoked between the time that the root rcu_node structure's ->gp_seq
3845  * field is updated and the time that the rcu_state structure's ->gp_seq
3846  * field is updated.  Therefore, if a single synchronize_rcu() is to
3847  * cause a subsequent poll_state_synchronize_rcu_full() to return @true,
3848  * then the root rcu_node structure is the one that needs to be polled.
3849  */
poll_state_synchronize_rcu_full(struct rcu_gp_oldstate * rgosp)3850 bool poll_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
3851 {
3852 	struct rcu_node *rnp = rcu_get_root();
3853 
3854 	smp_mb(); // Order against root rcu_node structure grace-period cleanup.
3855 	if (rgosp->rgos_norm == RCU_GET_STATE_COMPLETED ||
3856 	    rcu_seq_done_exact(&rnp->gp_seq, rgosp->rgos_norm) ||
3857 	    rgosp->rgos_exp == RCU_GET_STATE_COMPLETED ||
3858 	    rcu_seq_done_exact(&rcu_state.expedited_sequence, rgosp->rgos_exp)) {
3859 		smp_mb(); /* Ensure GP ends before subsequent accesses. */
3860 		return true;
3861 	}
3862 	return false;
3863 }
3864 EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu_full);
3865 
3866 /**
3867  * cond_synchronize_rcu - Conditionally wait for an RCU grace period
3868  * @oldstate: value from get_state_synchronize_rcu(), start_poll_synchronize_rcu(), or start_poll_synchronize_rcu_expedited()
3869  *
3870  * If a full RCU grace period has elapsed since the earlier call to
3871  * get_state_synchronize_rcu() or start_poll_synchronize_rcu(), just return.
3872  * Otherwise, invoke synchronize_rcu() to wait for a full grace period.
3873  *
3874  * Yes, this function does not take counter wrap into account.
3875  * But counter wrap is harmless.  If the counter wraps, we have waited for
3876  * more than 2 billion grace periods (and way more on a 64-bit system!),
3877  * so waiting for a couple of additional grace periods should be just fine.
3878  *
3879  * This function provides the same memory-ordering guarantees that
3880  * would be provided by a synchronize_rcu() that was invoked at the call
3881  * to the function that provided @oldstate and that returned at the end
3882  * of this function.
3883  */
cond_synchronize_rcu(unsigned long oldstate)3884 void cond_synchronize_rcu(unsigned long oldstate)
3885 {
3886 	if (!poll_state_synchronize_rcu(oldstate))
3887 		synchronize_rcu();
3888 }
3889 EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
3890 
3891 /**
3892  * cond_synchronize_rcu_full - Conditionally wait for an RCU grace period
3893  * @rgosp: value from get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(), or start_poll_synchronize_rcu_expedited_full()
3894  *
3895  * If a full RCU grace period has elapsed since the call to
3896  * get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(),
3897  * or start_poll_synchronize_rcu_expedited_full() from which @rgosp was
3898  * obtained, just return.  Otherwise, invoke synchronize_rcu() to wait
3899  * for a full grace period.
3900  *
3901  * Yes, this function does not take counter wrap into account.
3902  * But counter wrap is harmless.  If the counter wraps, we have waited for
3903  * more than 2 billion grace periods (and way more on a 64-bit system!),
3904  * so waiting for a couple of additional grace periods should be just fine.
3905  *
3906  * This function provides the same memory-ordering guarantees that
3907  * would be provided by a synchronize_rcu() that was invoked at the call
3908  * to the function that provided @rgosp and that returned at the end of
3909  * this function.
3910  */
cond_synchronize_rcu_full(struct rcu_gp_oldstate * rgosp)3911 void cond_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
3912 {
3913 	if (!poll_state_synchronize_rcu_full(rgosp))
3914 		synchronize_rcu();
3915 }
3916 EXPORT_SYMBOL_GPL(cond_synchronize_rcu_full);
3917 
3918 /*
3919  * Check to see if there is any immediate RCU-related work to be done by
3920  * the current CPU, returning 1 if so and zero otherwise.  The checks are
3921  * in order of increasing expense: checks that can be carried out against
3922  * CPU-local state are performed first.  However, we must check for CPU
3923  * stalls first, else we might not get a chance.
3924  */
rcu_pending(int user)3925 static int rcu_pending(int user)
3926 {
3927 	bool gp_in_progress;
3928 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
3929 	struct rcu_node *rnp = rdp->mynode;
3930 
3931 	lockdep_assert_irqs_disabled();
3932 
3933 	/* Check for CPU stalls, if enabled. */
3934 	check_cpu_stall(rdp);
3935 
3936 	/* Does this CPU need a deferred NOCB wakeup? */
3937 	if (rcu_nocb_need_deferred_wakeup(rdp, RCU_NOCB_WAKE))
3938 		return 1;
3939 
3940 	/* Is this a nohz_full CPU in userspace or idle?  (Ignore RCU if so.) */
3941 	if ((user || rcu_is_cpu_rrupt_from_idle()) && rcu_nohz_full_cpu())
3942 		return 0;
3943 
3944 	/* Is the RCU core waiting for a quiescent state from this CPU? */
3945 	gp_in_progress = rcu_gp_in_progress();
3946 	if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm && gp_in_progress)
3947 		return 1;
3948 
3949 	/* Does this CPU have callbacks ready to invoke? */
3950 	if (!rcu_rdp_is_offloaded(rdp) &&
3951 	    rcu_segcblist_ready_cbs(&rdp->cblist))
3952 		return 1;
3953 
3954 	/* Has RCU gone idle with this CPU needing another grace period? */
3955 	if (!gp_in_progress && rcu_segcblist_is_enabled(&rdp->cblist) &&
3956 	    !rcu_rdp_is_offloaded(rdp) &&
3957 	    !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
3958 		return 1;
3959 
3960 	/* Have RCU grace period completed or started?  */
3961 	if (rcu_seq_current(&rnp->gp_seq) != rdp->gp_seq ||
3962 	    unlikely(READ_ONCE(rdp->gpwrap))) /* outside lock */
3963 		return 1;
3964 
3965 	/* nothing to do */
3966 	return 0;
3967 }
3968 
3969 /*
3970  * Helper function for rcu_barrier() tracing.  If tracing is disabled,
3971  * the compiler is expected to optimize this away.
3972  */
rcu_barrier_trace(const char * s,int cpu,unsigned long done)3973 static void rcu_barrier_trace(const char *s, int cpu, unsigned long done)
3974 {
3975 	trace_rcu_barrier(rcu_state.name, s, cpu,
3976 			  atomic_read(&rcu_state.barrier_cpu_count), done);
3977 }
3978 
3979 /*
3980  * RCU callback function for rcu_barrier().  If we are last, wake
3981  * up the task executing rcu_barrier().
3982  *
3983  * Note that the value of rcu_state.barrier_sequence must be captured
3984  * before the atomic_dec_and_test().  Otherwise, if this CPU is not last,
3985  * other CPUs might count the value down to zero before this CPU gets
3986  * around to invoking rcu_barrier_trace(), which might result in bogus
3987  * data from the next instance of rcu_barrier().
3988  */
rcu_barrier_callback(struct rcu_head * rhp)3989 static void rcu_barrier_callback(struct rcu_head *rhp)
3990 {
3991 	unsigned long __maybe_unused s = rcu_state.barrier_sequence;
3992 
3993 	if (atomic_dec_and_test(&rcu_state.barrier_cpu_count)) {
3994 		rcu_barrier_trace(TPS("LastCB"), -1, s);
3995 		complete(&rcu_state.barrier_completion);
3996 	} else {
3997 		rcu_barrier_trace(TPS("CB"), -1, s);
3998 	}
3999 }
4000 
4001 /*
4002  * If needed, entrain an rcu_barrier() callback on rdp->cblist.
4003  */
rcu_barrier_entrain(struct rcu_data * rdp)4004 static void rcu_barrier_entrain(struct rcu_data *rdp)
4005 {
4006 	unsigned long gseq = READ_ONCE(rcu_state.barrier_sequence);
4007 	unsigned long lseq = READ_ONCE(rdp->barrier_seq_snap);
4008 	bool wake_nocb = false;
4009 	bool was_alldone = false;
4010 
4011 	lockdep_assert_held(&rcu_state.barrier_lock);
4012 	if (rcu_seq_state(lseq) || !rcu_seq_state(gseq) || rcu_seq_ctr(lseq) != rcu_seq_ctr(gseq))
4013 		return;
4014 	rcu_barrier_trace(TPS("IRQ"), -1, rcu_state.barrier_sequence);
4015 	rdp->barrier_head.func = rcu_barrier_callback;
4016 	debug_rcu_head_queue(&rdp->barrier_head);
4017 	rcu_nocb_lock(rdp);
4018 	/*
4019 	 * Flush bypass and wakeup rcuog if we add callbacks to an empty regular
4020 	 * queue. This way we don't wait for bypass timer that can reach seconds
4021 	 * if it's fully lazy.
4022 	 */
4023 	was_alldone = rcu_rdp_is_offloaded(rdp) && !rcu_segcblist_pend_cbs(&rdp->cblist);
4024 	WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies, false));
4025 	wake_nocb = was_alldone && rcu_segcblist_pend_cbs(&rdp->cblist);
4026 	if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head)) {
4027 		atomic_inc(&rcu_state.barrier_cpu_count);
4028 	} else {
4029 		debug_rcu_head_unqueue(&rdp->barrier_head);
4030 		rcu_barrier_trace(TPS("IRQNQ"), -1, rcu_state.barrier_sequence);
4031 	}
4032 	rcu_nocb_unlock(rdp);
4033 	if (wake_nocb)
4034 		wake_nocb_gp(rdp, false);
4035 	smp_store_release(&rdp->barrier_seq_snap, gseq);
4036 }
4037 
4038 /*
4039  * Called with preemption disabled, and from cross-cpu IRQ context.
4040  */
rcu_barrier_handler(void * cpu_in)4041 static void rcu_barrier_handler(void *cpu_in)
4042 {
4043 	uintptr_t cpu = (uintptr_t)cpu_in;
4044 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4045 
4046 	lockdep_assert_irqs_disabled();
4047 	WARN_ON_ONCE(cpu != rdp->cpu);
4048 	WARN_ON_ONCE(cpu != smp_processor_id());
4049 	raw_spin_lock(&rcu_state.barrier_lock);
4050 	rcu_barrier_entrain(rdp);
4051 	raw_spin_unlock(&rcu_state.barrier_lock);
4052 }
4053 
4054 /**
4055  * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
4056  *
4057  * Note that this primitive does not necessarily wait for an RCU grace period
4058  * to complete.  For example, if there are no RCU callbacks queued anywhere
4059  * in the system, then rcu_barrier() is within its rights to return
4060  * immediately, without waiting for anything, much less an RCU grace period.
4061  */
rcu_barrier(void)4062 void rcu_barrier(void)
4063 {
4064 	uintptr_t cpu;
4065 	unsigned long flags;
4066 	unsigned long gseq;
4067 	struct rcu_data *rdp;
4068 	unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence);
4069 
4070 	rcu_barrier_trace(TPS("Begin"), -1, s);
4071 
4072 	/* Take mutex to serialize concurrent rcu_barrier() requests. */
4073 	mutex_lock(&rcu_state.barrier_mutex);
4074 
4075 	/* Did someone else do our work for us? */
4076 	if (rcu_seq_done(&rcu_state.barrier_sequence, s)) {
4077 		rcu_barrier_trace(TPS("EarlyExit"), -1, rcu_state.barrier_sequence);
4078 		smp_mb(); /* caller's subsequent code after above check. */
4079 		mutex_unlock(&rcu_state.barrier_mutex);
4080 		return;
4081 	}
4082 
4083 	/* Mark the start of the barrier operation. */
4084 	raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags);
4085 	rcu_seq_start(&rcu_state.barrier_sequence);
4086 	gseq = rcu_state.barrier_sequence;
4087 	rcu_barrier_trace(TPS("Inc1"), -1, rcu_state.barrier_sequence);
4088 
4089 	/*
4090 	 * Initialize the count to two rather than to zero in order
4091 	 * to avoid a too-soon return to zero in case of an immediate
4092 	 * invocation of the just-enqueued callback (or preemption of
4093 	 * this task).  Exclude CPU-hotplug operations to ensure that no
4094 	 * offline non-offloaded CPU has callbacks queued.
4095 	 */
4096 	init_completion(&rcu_state.barrier_completion);
4097 	atomic_set(&rcu_state.barrier_cpu_count, 2);
4098 	raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
4099 
4100 	/*
4101 	 * Force each CPU with callbacks to register a new callback.
4102 	 * When that callback is invoked, we will know that all of the
4103 	 * corresponding CPU's preceding callbacks have been invoked.
4104 	 */
4105 	for_each_possible_cpu(cpu) {
4106 		rdp = per_cpu_ptr(&rcu_data, cpu);
4107 retry:
4108 		if (smp_load_acquire(&rdp->barrier_seq_snap) == gseq)
4109 			continue;
4110 		raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags);
4111 		if (!rcu_segcblist_n_cbs(&rdp->cblist)) {
4112 			WRITE_ONCE(rdp->barrier_seq_snap, gseq);
4113 			raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
4114 			rcu_barrier_trace(TPS("NQ"), cpu, rcu_state.barrier_sequence);
4115 			continue;
4116 		}
4117 		if (!rcu_rdp_cpu_online(rdp)) {
4118 			rcu_barrier_entrain(rdp);
4119 			WARN_ON_ONCE(READ_ONCE(rdp->barrier_seq_snap) != gseq);
4120 			raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
4121 			rcu_barrier_trace(TPS("OfflineNoCBQ"), cpu, rcu_state.barrier_sequence);
4122 			continue;
4123 		}
4124 		raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
4125 		if (smp_call_function_single(cpu, rcu_barrier_handler, (void *)cpu, 1)) {
4126 			schedule_timeout_uninterruptible(1);
4127 			goto retry;
4128 		}
4129 		WARN_ON_ONCE(READ_ONCE(rdp->barrier_seq_snap) != gseq);
4130 		rcu_barrier_trace(TPS("OnlineQ"), cpu, rcu_state.barrier_sequence);
4131 	}
4132 
4133 	/*
4134 	 * Now that we have an rcu_barrier_callback() callback on each
4135 	 * CPU, and thus each counted, remove the initial count.
4136 	 */
4137 	if (atomic_sub_and_test(2, &rcu_state.barrier_cpu_count))
4138 		complete(&rcu_state.barrier_completion);
4139 
4140 	/* Wait for all rcu_barrier_callback() callbacks to be invoked. */
4141 	wait_for_completion(&rcu_state.barrier_completion);
4142 
4143 	/* Mark the end of the barrier operation. */
4144 	rcu_barrier_trace(TPS("Inc2"), -1, rcu_state.barrier_sequence);
4145 	rcu_seq_end(&rcu_state.barrier_sequence);
4146 	gseq = rcu_state.barrier_sequence;
4147 	for_each_possible_cpu(cpu) {
4148 		rdp = per_cpu_ptr(&rcu_data, cpu);
4149 
4150 		WRITE_ONCE(rdp->barrier_seq_snap, gseq);
4151 	}
4152 
4153 	/* Other rcu_barrier() invocations can now safely proceed. */
4154 	mutex_unlock(&rcu_state.barrier_mutex);
4155 }
4156 EXPORT_SYMBOL_GPL(rcu_barrier);
4157 
4158 /*
4159  * Propagate ->qsinitmask bits up the rcu_node tree to account for the
4160  * first CPU in a given leaf rcu_node structure coming online.  The caller
4161  * must hold the corresponding leaf rcu_node ->lock with interrupts
4162  * disabled.
4163  */
rcu_init_new_rnp(struct rcu_node * rnp_leaf)4164 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
4165 {
4166 	long mask;
4167 	long oldmask;
4168 	struct rcu_node *rnp = rnp_leaf;
4169 
4170 	raw_lockdep_assert_held_rcu_node(rnp_leaf);
4171 	WARN_ON_ONCE(rnp->wait_blkd_tasks);
4172 	for (;;) {
4173 		mask = rnp->grpmask;
4174 		rnp = rnp->parent;
4175 		if (rnp == NULL)
4176 			return;
4177 		raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */
4178 		oldmask = rnp->qsmaskinit;
4179 		rnp->qsmaskinit |= mask;
4180 		raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */
4181 		if (oldmask)
4182 			return;
4183 	}
4184 }
4185 
4186 /*
4187  * Do boot-time initialization of a CPU's per-CPU RCU data.
4188  */
4189 static void __init
rcu_boot_init_percpu_data(int cpu)4190 rcu_boot_init_percpu_data(int cpu)
4191 {
4192 	struct context_tracking *ct = this_cpu_ptr(&context_tracking);
4193 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4194 
4195 	/* Set up local state, ensuring consistent view of global state. */
4196 	rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
4197 	INIT_WORK(&rdp->strict_work, strict_work_handler);
4198 	WARN_ON_ONCE(ct->dynticks_nesting != 1);
4199 	WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(cpu)));
4200 	rdp->barrier_seq_snap = rcu_state.barrier_sequence;
4201 	rdp->rcu_ofl_gp_seq = rcu_state.gp_seq;
4202 	rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED;
4203 	rdp->rcu_onl_gp_seq = rcu_state.gp_seq;
4204 	rdp->rcu_onl_gp_flags = RCU_GP_CLEANED;
4205 	rdp->last_sched_clock = jiffies;
4206 	rdp->cpu = cpu;
4207 	rcu_boot_init_nocb_percpu_data(rdp);
4208 }
4209 
4210 /*
4211  * Invoked early in the CPU-online process, when pretty much all services
4212  * are available.  The incoming CPU is not present.
4213  *
4214  * Initializes a CPU's per-CPU RCU data.  Note that only one online or
4215  * offline event can be happening at a given time.  Note also that we can
4216  * accept some slop in the rsp->gp_seq access due to the fact that this
4217  * CPU cannot possibly have any non-offloaded RCU callbacks in flight yet.
4218  * And any offloaded callbacks are being numbered elsewhere.
4219  */
rcutree_prepare_cpu(unsigned int cpu)4220 int rcutree_prepare_cpu(unsigned int cpu)
4221 {
4222 	unsigned long flags;
4223 	struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
4224 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4225 	struct rcu_node *rnp = rcu_get_root();
4226 
4227 	/* Set up local state, ensuring consistent view of global state. */
4228 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
4229 	rdp->qlen_last_fqs_check = 0;
4230 	rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
4231 	rdp->blimit = blimit;
4232 	ct->dynticks_nesting = 1;	/* CPU not up, no tearing. */
4233 	raw_spin_unlock_rcu_node(rnp);		/* irqs remain disabled. */
4234 
4235 	/*
4236 	 * Only non-NOCB CPUs that didn't have early-boot callbacks need to be
4237 	 * (re-)initialized.
4238 	 */
4239 	if (!rcu_segcblist_is_enabled(&rdp->cblist))
4240 		rcu_segcblist_init(&rdp->cblist);  /* Re-enable callbacks. */
4241 
4242 	/*
4243 	 * Add CPU to leaf rcu_node pending-online bitmask.  Any needed
4244 	 * propagation up the rcu_node tree will happen at the beginning
4245 	 * of the next grace period.
4246 	 */
4247 	rnp = rdp->mynode;
4248 	raw_spin_lock_rcu_node(rnp);		/* irqs already disabled. */
4249 	rdp->gp_seq = READ_ONCE(rnp->gp_seq);
4250 	rdp->gp_seq_needed = rdp->gp_seq;
4251 	rdp->cpu_no_qs.b.norm = true;
4252 	rdp->core_needs_qs = false;
4253 	rdp->rcu_iw_pending = false;
4254 	rdp->rcu_iw = IRQ_WORK_INIT_HARD(rcu_iw_handler);
4255 	rdp->rcu_iw_gp_seq = rdp->gp_seq - 1;
4256 	trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl"));
4257 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4258 	rcu_spawn_one_boost_kthread(rnp);
4259 	rcu_spawn_cpu_nocb_kthread(cpu);
4260 	WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus + 1);
4261 
4262 	return 0;
4263 }
4264 
4265 /*
4266  * Update RCU priority boot kthread affinity for CPU-hotplug changes.
4267  */
rcutree_affinity_setting(unsigned int cpu,int outgoing)4268 static void rcutree_affinity_setting(unsigned int cpu, int outgoing)
4269 {
4270 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4271 
4272 	rcu_boost_kthread_setaffinity(rdp->mynode, outgoing);
4273 }
4274 
4275 /*
4276  * Has the specified (known valid) CPU ever been fully online?
4277  */
rcu_cpu_beenfullyonline(int cpu)4278 bool rcu_cpu_beenfullyonline(int cpu)
4279 {
4280 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4281 
4282 	return smp_load_acquire(&rdp->beenonline);
4283 }
4284 
4285 /*
4286  * Near the end of the CPU-online process.  Pretty much all services
4287  * enabled, and the CPU is now very much alive.
4288  */
rcutree_online_cpu(unsigned int cpu)4289 int rcutree_online_cpu(unsigned int cpu)
4290 {
4291 	unsigned long flags;
4292 	struct rcu_data *rdp;
4293 	struct rcu_node *rnp;
4294 
4295 	rdp = per_cpu_ptr(&rcu_data, cpu);
4296 	rnp = rdp->mynode;
4297 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
4298 	rnp->ffmask |= rdp->grpmask;
4299 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4300 	if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
4301 		return 0; /* Too early in boot for scheduler work. */
4302 	sync_sched_exp_online_cleanup(cpu);
4303 	rcutree_affinity_setting(cpu, -1);
4304 
4305 	// Stop-machine done, so allow nohz_full to disable tick.
4306 	tick_dep_clear(TICK_DEP_BIT_RCU);
4307 	return 0;
4308 }
4309 
4310 /*
4311  * Near the beginning of the process.  The CPU is still very much alive
4312  * with pretty much all services enabled.
4313  */
rcutree_offline_cpu(unsigned int cpu)4314 int rcutree_offline_cpu(unsigned int cpu)
4315 {
4316 	unsigned long flags;
4317 	struct rcu_data *rdp;
4318 	struct rcu_node *rnp;
4319 
4320 	rdp = per_cpu_ptr(&rcu_data, cpu);
4321 	rnp = rdp->mynode;
4322 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
4323 	rnp->ffmask &= ~rdp->grpmask;
4324 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4325 
4326 	rcutree_affinity_setting(cpu, cpu);
4327 
4328 	// nohz_full CPUs need the tick for stop-machine to work quickly
4329 	tick_dep_set(TICK_DEP_BIT_RCU);
4330 	return 0;
4331 }
4332 
4333 /*
4334  * Mark the specified CPU as being online so that subsequent grace periods
4335  * (both expedited and normal) will wait on it.  Note that this means that
4336  * incoming CPUs are not allowed to use RCU read-side critical sections
4337  * until this function is called.  Failing to observe this restriction
4338  * will result in lockdep splats.
4339  *
4340  * Note that this function is special in that it is invoked directly
4341  * from the incoming CPU rather than from the cpuhp_step mechanism.
4342  * This is because this function must be invoked at a precise location.
4343  * This incoming CPU must not have enabled interrupts yet.
4344  */
rcu_cpu_starting(unsigned int cpu)4345 void rcu_cpu_starting(unsigned int cpu)
4346 {
4347 	unsigned long mask;
4348 	struct rcu_data *rdp;
4349 	struct rcu_node *rnp;
4350 	bool newcpu;
4351 
4352 	lockdep_assert_irqs_disabled();
4353 	rdp = per_cpu_ptr(&rcu_data, cpu);
4354 	if (rdp->cpu_started)
4355 		return;
4356 	rdp->cpu_started = true;
4357 
4358 	rnp = rdp->mynode;
4359 	mask = rdp->grpmask;
4360 	arch_spin_lock(&rcu_state.ofl_lock);
4361 	rcu_dynticks_eqs_online();
4362 	raw_spin_lock(&rcu_state.barrier_lock);
4363 	raw_spin_lock_rcu_node(rnp);
4364 	WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext | mask);
4365 	raw_spin_unlock(&rcu_state.barrier_lock);
4366 	newcpu = !(rnp->expmaskinitnext & mask);
4367 	rnp->expmaskinitnext |= mask;
4368 	/* Allow lockless access for expedited grace periods. */
4369 	smp_store_release(&rcu_state.ncpus, rcu_state.ncpus + newcpu); /* ^^^ */
4370 	ASSERT_EXCLUSIVE_WRITER(rcu_state.ncpus);
4371 	rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */
4372 	rdp->rcu_onl_gp_seq = READ_ONCE(rcu_state.gp_seq);
4373 	rdp->rcu_onl_gp_flags = READ_ONCE(rcu_state.gp_flags);
4374 
4375 	/* An incoming CPU should never be blocking a grace period. */
4376 	if (WARN_ON_ONCE(rnp->qsmask & mask)) { /* RCU waiting on incoming CPU? */
4377 		/* rcu_report_qs_rnp() *really* wants some flags to restore */
4378 		unsigned long flags;
4379 
4380 		local_irq_save(flags);
4381 		rcu_disable_urgency_upon_qs(rdp);
4382 		/* Report QS -after- changing ->qsmaskinitnext! */
4383 		rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
4384 	} else {
4385 		raw_spin_unlock_rcu_node(rnp);
4386 	}
4387 	arch_spin_unlock(&rcu_state.ofl_lock);
4388 	smp_store_release(&rdp->beenonline, true);
4389 	smp_mb(); /* Ensure RCU read-side usage follows above initialization. */
4390 }
4391 
4392 /*
4393  * The outgoing function has no further need of RCU, so remove it from
4394  * the rcu_node tree's ->qsmaskinitnext bit masks.
4395  *
4396  * Note that this function is special in that it is invoked directly
4397  * from the outgoing CPU rather than from the cpuhp_step mechanism.
4398  * This is because this function must be invoked at a precise location.
4399  */
rcu_report_dead(unsigned int cpu)4400 void rcu_report_dead(unsigned int cpu)
4401 {
4402 	unsigned long flags, seq_flags;
4403 	unsigned long mask;
4404 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4405 	struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */
4406 
4407 	// Do any dangling deferred wakeups.
4408 	do_nocb_deferred_wakeup(rdp);
4409 
4410 	/* QS for any half-done expedited grace period. */
4411 	rcu_report_exp_rdp(rdp);
4412 	rcu_preempt_deferred_qs(current);
4413 
4414 	/* Remove outgoing CPU from mask in the leaf rcu_node structure. */
4415 	mask = rdp->grpmask;
4416 	local_irq_save(seq_flags);
4417 	arch_spin_lock(&rcu_state.ofl_lock);
4418 	raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
4419 	rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq);
4420 	rdp->rcu_ofl_gp_flags = READ_ONCE(rcu_state.gp_flags);
4421 	if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */
4422 		/* Report quiescent state -before- changing ->qsmaskinitnext! */
4423 		rcu_disable_urgency_upon_qs(rdp);
4424 		rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
4425 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
4426 	}
4427 	WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext & ~mask);
4428 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4429 	arch_spin_unlock(&rcu_state.ofl_lock);
4430 	local_irq_restore(seq_flags);
4431 
4432 	rdp->cpu_started = false;
4433 }
4434 
4435 #ifdef CONFIG_HOTPLUG_CPU
4436 /*
4437  * The outgoing CPU has just passed through the dying-idle state, and we
4438  * are being invoked from the CPU that was IPIed to continue the offline
4439  * operation.  Migrate the outgoing CPU's callbacks to the current CPU.
4440  */
rcutree_migrate_callbacks(int cpu)4441 void rcutree_migrate_callbacks(int cpu)
4442 {
4443 	unsigned long flags;
4444 	struct rcu_data *my_rdp;
4445 	struct rcu_node *my_rnp;
4446 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4447 	bool needwake;
4448 
4449 	if (rcu_rdp_is_offloaded(rdp) ||
4450 	    rcu_segcblist_empty(&rdp->cblist))
4451 		return;  /* No callbacks to migrate. */
4452 
4453 	raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags);
4454 	WARN_ON_ONCE(rcu_rdp_cpu_online(rdp));
4455 	rcu_barrier_entrain(rdp);
4456 	my_rdp = this_cpu_ptr(&rcu_data);
4457 	my_rnp = my_rdp->mynode;
4458 	rcu_nocb_lock(my_rdp); /* irqs already disabled. */
4459 	WARN_ON_ONCE(!rcu_nocb_flush_bypass(my_rdp, NULL, jiffies, false));
4460 	raw_spin_lock_rcu_node(my_rnp); /* irqs already disabled. */
4461 	/* Leverage recent GPs and set GP for new callbacks. */
4462 	needwake = rcu_advance_cbs(my_rnp, rdp) ||
4463 		   rcu_advance_cbs(my_rnp, my_rdp);
4464 	rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist);
4465 	raw_spin_unlock(&rcu_state.barrier_lock); /* irqs remain disabled. */
4466 	needwake = needwake || rcu_advance_cbs(my_rnp, my_rdp);
4467 	rcu_segcblist_disable(&rdp->cblist);
4468 	WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) != !rcu_segcblist_n_cbs(&my_rdp->cblist));
4469 	check_cb_ovld_locked(my_rdp, my_rnp);
4470 	if (rcu_rdp_is_offloaded(my_rdp)) {
4471 		raw_spin_unlock_rcu_node(my_rnp); /* irqs remain disabled. */
4472 		__call_rcu_nocb_wake(my_rdp, true, flags);
4473 	} else {
4474 		rcu_nocb_unlock(my_rdp); /* irqs remain disabled. */
4475 		raw_spin_unlock_irqrestore_rcu_node(my_rnp, flags);
4476 	}
4477 	if (needwake)
4478 		rcu_gp_kthread_wake();
4479 	lockdep_assert_irqs_enabled();
4480 	WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 ||
4481 		  !rcu_segcblist_empty(&rdp->cblist),
4482 		  "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, 1stCB=%p\n",
4483 		  cpu, rcu_segcblist_n_cbs(&rdp->cblist),
4484 		  rcu_segcblist_first_cb(&rdp->cblist));
4485 }
4486 #endif
4487 
4488 /*
4489  * On non-huge systems, use expedited RCU grace periods to make suspend
4490  * and hibernation run faster.
4491  */
rcu_pm_notify(struct notifier_block * self,unsigned long action,void * hcpu)4492 static int rcu_pm_notify(struct notifier_block *self,
4493 			 unsigned long action, void *hcpu)
4494 {
4495 	switch (action) {
4496 	case PM_HIBERNATION_PREPARE:
4497 	case PM_SUSPEND_PREPARE:
4498 		rcu_async_hurry();
4499 		rcu_expedite_gp();
4500 		break;
4501 	case PM_POST_HIBERNATION:
4502 	case PM_POST_SUSPEND:
4503 		rcu_unexpedite_gp();
4504 		rcu_async_relax();
4505 		break;
4506 	default:
4507 		break;
4508 	}
4509 	return NOTIFY_OK;
4510 }
4511 
4512 #ifdef CONFIG_RCU_EXP_KTHREAD
4513 struct kthread_worker *rcu_exp_gp_kworker;
4514 struct kthread_worker *rcu_exp_par_gp_kworker;
4515 
rcu_start_exp_gp_kworkers(void)4516 static void __init rcu_start_exp_gp_kworkers(void)
4517 {
4518 	const char *par_gp_kworker_name = "rcu_exp_par_gp_kthread_worker";
4519 	const char *gp_kworker_name = "rcu_exp_gp_kthread_worker";
4520 	struct sched_param param = { .sched_priority = kthread_prio };
4521 
4522 	rcu_exp_gp_kworker = kthread_create_worker(0, gp_kworker_name);
4523 	if (IS_ERR_OR_NULL(rcu_exp_gp_kworker)) {
4524 		pr_err("Failed to create %s!\n", gp_kworker_name);
4525 		return;
4526 	}
4527 
4528 	rcu_exp_par_gp_kworker = kthread_create_worker(0, par_gp_kworker_name);
4529 	if (IS_ERR_OR_NULL(rcu_exp_par_gp_kworker)) {
4530 		pr_err("Failed to create %s!\n", par_gp_kworker_name);
4531 		kthread_destroy_worker(rcu_exp_gp_kworker);
4532 		return;
4533 	}
4534 
4535 	sched_setscheduler_nocheck(rcu_exp_gp_kworker->task, SCHED_FIFO, &param);
4536 	sched_setscheduler_nocheck(rcu_exp_par_gp_kworker->task, SCHED_FIFO,
4537 				   &param);
4538 }
4539 
rcu_alloc_par_gp_wq(void)4540 static inline void rcu_alloc_par_gp_wq(void)
4541 {
4542 }
4543 #else /* !CONFIG_RCU_EXP_KTHREAD */
4544 struct workqueue_struct *rcu_par_gp_wq;
4545 
rcu_start_exp_gp_kworkers(void)4546 static void __init rcu_start_exp_gp_kworkers(void)
4547 {
4548 }
4549 
rcu_alloc_par_gp_wq(void)4550 static inline void rcu_alloc_par_gp_wq(void)
4551 {
4552 	rcu_par_gp_wq = alloc_workqueue("rcu_par_gp", WQ_MEM_RECLAIM, 0);
4553 	WARN_ON(!rcu_par_gp_wq);
4554 }
4555 #endif /* CONFIG_RCU_EXP_KTHREAD */
4556 
4557 /*
4558  * Spawn the kthreads that handle RCU's grace periods.
4559  */
rcu_spawn_gp_kthread(void)4560 static int __init rcu_spawn_gp_kthread(void)
4561 {
4562 	unsigned long flags;
4563 	struct rcu_node *rnp;
4564 	struct sched_param sp;
4565 	struct task_struct *t;
4566 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
4567 
4568 	rcu_scheduler_fully_active = 1;
4569 	t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name);
4570 	if (WARN_ONCE(IS_ERR(t), "%s: Could not start grace-period kthread, OOM is now expected behavior\n", __func__))
4571 		return 0;
4572 	if (kthread_prio) {
4573 		sp.sched_priority = kthread_prio;
4574 		sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
4575 	}
4576 	rnp = rcu_get_root();
4577 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
4578 	WRITE_ONCE(rcu_state.gp_activity, jiffies);
4579 	WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
4580 	// Reset .gp_activity and .gp_req_activity before setting .gp_kthread.
4581 	smp_store_release(&rcu_state.gp_kthread, t);  /* ^^^ */
4582 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4583 	wake_up_process(t);
4584 	/* This is a pre-SMP initcall, we expect a single CPU */
4585 	WARN_ON(num_online_cpus() > 1);
4586 	/*
4587 	 * Those kthreads couldn't be created on rcu_init() -> rcutree_prepare_cpu()
4588 	 * due to rcu_scheduler_fully_active.
4589 	 */
4590 	rcu_spawn_cpu_nocb_kthread(smp_processor_id());
4591 	rcu_spawn_one_boost_kthread(rdp->mynode);
4592 	rcu_spawn_core_kthreads();
4593 	/* Create kthread worker for expedited GPs */
4594 	rcu_start_exp_gp_kworkers();
4595 	return 0;
4596 }
4597 early_initcall(rcu_spawn_gp_kthread);
4598 
4599 /*
4600  * This function is invoked towards the end of the scheduler's
4601  * initialization process.  Before this is called, the idle task might
4602  * contain synchronous grace-period primitives (during which time, this idle
4603  * task is booting the system, and such primitives are no-ops).  After this
4604  * function is called, any synchronous grace-period primitives are run as
4605  * expedited, with the requesting task driving the grace period forward.
4606  * A later core_initcall() rcu_set_runtime_mode() will switch to full
4607  * runtime RCU functionality.
4608  */
rcu_scheduler_starting(void)4609 void rcu_scheduler_starting(void)
4610 {
4611 	unsigned long flags;
4612 	struct rcu_node *rnp;
4613 
4614 	WARN_ON(num_online_cpus() != 1);
4615 	WARN_ON(nr_context_switches() > 0);
4616 	rcu_test_sync_prims();
4617 
4618 	// Fix up the ->gp_seq counters.
4619 	local_irq_save(flags);
4620 	rcu_for_each_node_breadth_first(rnp)
4621 		rnp->gp_seq_needed = rnp->gp_seq = rcu_state.gp_seq;
4622 	local_irq_restore(flags);
4623 
4624 	// Switch out of early boot mode.
4625 	rcu_scheduler_active = RCU_SCHEDULER_INIT;
4626 	rcu_test_sync_prims();
4627 }
4628 
4629 /*
4630  * Helper function for rcu_init() that initializes the rcu_state structure.
4631  */
rcu_init_one(void)4632 static void __init rcu_init_one(void)
4633 {
4634 	static const char * const buf[] = RCU_NODE_NAME_INIT;
4635 	static const char * const fqs[] = RCU_FQS_NAME_INIT;
4636 	static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
4637 	static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
4638 
4639 	int levelspread[RCU_NUM_LVLS];		/* kids/node in each level. */
4640 	int cpustride = 1;
4641 	int i;
4642 	int j;
4643 	struct rcu_node *rnp;
4644 
4645 	BUILD_BUG_ON(RCU_NUM_LVLS > ARRAY_SIZE(buf));  /* Fix buf[] init! */
4646 
4647 	/* Silence gcc 4.8 false positive about array index out of range. */
4648 	if (rcu_num_lvls <= 0 || rcu_num_lvls > RCU_NUM_LVLS)
4649 		panic("rcu_init_one: rcu_num_lvls out of range");
4650 
4651 	/* Initialize the level-tracking arrays. */
4652 
4653 	for (i = 1; i < rcu_num_lvls; i++)
4654 		rcu_state.level[i] =
4655 			rcu_state.level[i - 1] + num_rcu_lvl[i - 1];
4656 	rcu_init_levelspread(levelspread, num_rcu_lvl);
4657 
4658 	/* Initialize the elements themselves, starting from the leaves. */
4659 
4660 	for (i = rcu_num_lvls - 1; i >= 0; i--) {
4661 		cpustride *= levelspread[i];
4662 		rnp = rcu_state.level[i];
4663 		for (j = 0; j < num_rcu_lvl[i]; j++, rnp++) {
4664 			raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock));
4665 			lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock),
4666 						   &rcu_node_class[i], buf[i]);
4667 			raw_spin_lock_init(&rnp->fqslock);
4668 			lockdep_set_class_and_name(&rnp->fqslock,
4669 						   &rcu_fqs_class[i], fqs[i]);
4670 			rnp->gp_seq = rcu_state.gp_seq;
4671 			rnp->gp_seq_needed = rcu_state.gp_seq;
4672 			rnp->completedqs = rcu_state.gp_seq;
4673 			rnp->qsmask = 0;
4674 			rnp->qsmaskinit = 0;
4675 			rnp->grplo = j * cpustride;
4676 			rnp->grphi = (j + 1) * cpustride - 1;
4677 			if (rnp->grphi >= nr_cpu_ids)
4678 				rnp->grphi = nr_cpu_ids - 1;
4679 			if (i == 0) {
4680 				rnp->grpnum = 0;
4681 				rnp->grpmask = 0;
4682 				rnp->parent = NULL;
4683 			} else {
4684 				rnp->grpnum = j % levelspread[i - 1];
4685 				rnp->grpmask = BIT(rnp->grpnum);
4686 				rnp->parent = rcu_state.level[i - 1] +
4687 					      j / levelspread[i - 1];
4688 			}
4689 			rnp->level = i;
4690 			INIT_LIST_HEAD(&rnp->blkd_tasks);
4691 			rcu_init_one_nocb(rnp);
4692 			init_waitqueue_head(&rnp->exp_wq[0]);
4693 			init_waitqueue_head(&rnp->exp_wq[1]);
4694 			init_waitqueue_head(&rnp->exp_wq[2]);
4695 			init_waitqueue_head(&rnp->exp_wq[3]);
4696 			spin_lock_init(&rnp->exp_lock);
4697 			mutex_init(&rnp->boost_kthread_mutex);
4698 			raw_spin_lock_init(&rnp->exp_poll_lock);
4699 			rnp->exp_seq_poll_rq = RCU_GET_STATE_COMPLETED;
4700 			INIT_WORK(&rnp->exp_poll_wq, sync_rcu_do_polled_gp);
4701 		}
4702 	}
4703 
4704 	init_swait_queue_head(&rcu_state.gp_wq);
4705 	init_swait_queue_head(&rcu_state.expedited_wq);
4706 	rnp = rcu_first_leaf_node();
4707 	for_each_possible_cpu(i) {
4708 		while (i > rnp->grphi)
4709 			rnp++;
4710 		per_cpu_ptr(&rcu_data, i)->mynode = rnp;
4711 		rcu_boot_init_percpu_data(i);
4712 	}
4713 }
4714 
4715 /*
4716  * Force priority from the kernel command-line into range.
4717  */
sanitize_kthread_prio(void)4718 static void __init sanitize_kthread_prio(void)
4719 {
4720 	int kthread_prio_in = kthread_prio;
4721 
4722 	if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 2
4723 	    && IS_BUILTIN(CONFIG_RCU_TORTURE_TEST))
4724 		kthread_prio = 2;
4725 	else if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
4726 		kthread_prio = 1;
4727 	else if (kthread_prio < 0)
4728 		kthread_prio = 0;
4729 	else if (kthread_prio > 99)
4730 		kthread_prio = 99;
4731 
4732 	if (kthread_prio != kthread_prio_in)
4733 		pr_alert("%s: Limited prio to %d from %d\n",
4734 			 __func__, kthread_prio, kthread_prio_in);
4735 }
4736 
4737 /*
4738  * Compute the rcu_node tree geometry from kernel parameters.  This cannot
4739  * replace the definitions in tree.h because those are needed to size
4740  * the ->node array in the rcu_state structure.
4741  */
rcu_init_geometry(void)4742 void rcu_init_geometry(void)
4743 {
4744 	ulong d;
4745 	int i;
4746 	static unsigned long old_nr_cpu_ids;
4747 	int rcu_capacity[RCU_NUM_LVLS];
4748 	static bool initialized;
4749 
4750 	if (initialized) {
4751 		/*
4752 		 * Warn if setup_nr_cpu_ids() had not yet been invoked,
4753 		 * unless nr_cpus_ids == NR_CPUS, in which case who cares?
4754 		 */
4755 		WARN_ON_ONCE(old_nr_cpu_ids != nr_cpu_ids);
4756 		return;
4757 	}
4758 
4759 	old_nr_cpu_ids = nr_cpu_ids;
4760 	initialized = true;
4761 
4762 	/*
4763 	 * Initialize any unspecified boot parameters.
4764 	 * The default values of jiffies_till_first_fqs and
4765 	 * jiffies_till_next_fqs are set to the RCU_JIFFIES_TILL_FORCE_QS
4766 	 * value, which is a function of HZ, then adding one for each
4767 	 * RCU_JIFFIES_FQS_DIV CPUs that might be on the system.
4768 	 */
4769 	d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
4770 	if (jiffies_till_first_fqs == ULONG_MAX)
4771 		jiffies_till_first_fqs = d;
4772 	if (jiffies_till_next_fqs == ULONG_MAX)
4773 		jiffies_till_next_fqs = d;
4774 	adjust_jiffies_till_sched_qs();
4775 
4776 	/* If the compile-time values are accurate, just leave. */
4777 	if (rcu_fanout_leaf == RCU_FANOUT_LEAF &&
4778 	    nr_cpu_ids == NR_CPUS)
4779 		return;
4780 	pr_info("Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n",
4781 		rcu_fanout_leaf, nr_cpu_ids);
4782 
4783 	/*
4784 	 * The boot-time rcu_fanout_leaf parameter must be at least two
4785 	 * and cannot exceed the number of bits in the rcu_node masks.
4786 	 * Complain and fall back to the compile-time values if this
4787 	 * limit is exceeded.
4788 	 */
4789 	if (rcu_fanout_leaf < 2 ||
4790 	    rcu_fanout_leaf > sizeof(unsigned long) * 8) {
4791 		rcu_fanout_leaf = RCU_FANOUT_LEAF;
4792 		WARN_ON(1);
4793 		return;
4794 	}
4795 
4796 	/*
4797 	 * Compute number of nodes that can be handled an rcu_node tree
4798 	 * with the given number of levels.
4799 	 */
4800 	rcu_capacity[0] = rcu_fanout_leaf;
4801 	for (i = 1; i < RCU_NUM_LVLS; i++)
4802 		rcu_capacity[i] = rcu_capacity[i - 1] * RCU_FANOUT;
4803 
4804 	/*
4805 	 * The tree must be able to accommodate the configured number of CPUs.
4806 	 * If this limit is exceeded, fall back to the compile-time values.
4807 	 */
4808 	if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1]) {
4809 		rcu_fanout_leaf = RCU_FANOUT_LEAF;
4810 		WARN_ON(1);
4811 		return;
4812 	}
4813 
4814 	/* Calculate the number of levels in the tree. */
4815 	for (i = 0; nr_cpu_ids > rcu_capacity[i]; i++) {
4816 	}
4817 	rcu_num_lvls = i + 1;
4818 
4819 	/* Calculate the number of rcu_nodes at each level of the tree. */
4820 	for (i = 0; i < rcu_num_lvls; i++) {
4821 		int cap = rcu_capacity[(rcu_num_lvls - 1) - i];
4822 		num_rcu_lvl[i] = DIV_ROUND_UP(nr_cpu_ids, cap);
4823 	}
4824 
4825 	/* Calculate the total number of rcu_node structures. */
4826 	rcu_num_nodes = 0;
4827 	for (i = 0; i < rcu_num_lvls; i++)
4828 		rcu_num_nodes += num_rcu_lvl[i];
4829 }
4830 
4831 /*
4832  * Dump out the structure of the rcu_node combining tree associated
4833  * with the rcu_state structure.
4834  */
rcu_dump_rcu_node_tree(void)4835 static void __init rcu_dump_rcu_node_tree(void)
4836 {
4837 	int level = 0;
4838 	struct rcu_node *rnp;
4839 
4840 	pr_info("rcu_node tree layout dump\n");
4841 	pr_info(" ");
4842 	rcu_for_each_node_breadth_first(rnp) {
4843 		if (rnp->level != level) {
4844 			pr_cont("\n");
4845 			pr_info(" ");
4846 			level = rnp->level;
4847 		}
4848 		pr_cont("%d:%d ^%d  ", rnp->grplo, rnp->grphi, rnp->grpnum);
4849 	}
4850 	pr_cont("\n");
4851 }
4852 
4853 struct workqueue_struct *rcu_gp_wq;
4854 
kfree_rcu_batch_init(void)4855 static void __init kfree_rcu_batch_init(void)
4856 {
4857 	int cpu;
4858 	int i;
4859 
4860 	/* Clamp it to [0:100] seconds interval. */
4861 	if (rcu_delay_page_cache_fill_msec < 0 ||
4862 		rcu_delay_page_cache_fill_msec > 100 * MSEC_PER_SEC) {
4863 
4864 		rcu_delay_page_cache_fill_msec =
4865 			clamp(rcu_delay_page_cache_fill_msec, 0,
4866 				(int) (100 * MSEC_PER_SEC));
4867 
4868 		pr_info("Adjusting rcutree.rcu_delay_page_cache_fill_msec to %d ms.\n",
4869 			rcu_delay_page_cache_fill_msec);
4870 	}
4871 
4872 	for_each_possible_cpu(cpu) {
4873 		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
4874 
4875 		for (i = 0; i < KFREE_N_BATCHES; i++) {
4876 			INIT_RCU_WORK(&krcp->krw_arr[i].rcu_work, kfree_rcu_work);
4877 			krcp->krw_arr[i].krcp = krcp;
4878 		}
4879 
4880 		INIT_DELAYED_WORK(&krcp->monitor_work, kfree_rcu_monitor);
4881 		INIT_DELAYED_WORK(&krcp->page_cache_work, fill_page_cache_func);
4882 		krcp->initialized = true;
4883 	}
4884 	if (register_shrinker(&kfree_rcu_shrinker, "rcu-kfree"))
4885 		pr_err("Failed to register kfree_rcu() shrinker!\n");
4886 }
4887 
rcu_init(void)4888 void __init rcu_init(void)
4889 {
4890 	int cpu = smp_processor_id();
4891 
4892 	rcu_early_boot_tests();
4893 
4894 	kfree_rcu_batch_init();
4895 	rcu_bootup_announce();
4896 	sanitize_kthread_prio();
4897 	rcu_init_geometry();
4898 	rcu_init_one();
4899 	if (dump_tree)
4900 		rcu_dump_rcu_node_tree();
4901 	if (use_softirq)
4902 		open_softirq(RCU_SOFTIRQ, rcu_core_si);
4903 
4904 	/*
4905 	 * We don't need protection against CPU-hotplug here because
4906 	 * this is called early in boot, before either interrupts
4907 	 * or the scheduler are operational.
4908 	 */
4909 	pm_notifier(rcu_pm_notify, 0);
4910 	WARN_ON(num_online_cpus() > 1); // Only one CPU this early in boot.
4911 	rcutree_prepare_cpu(cpu);
4912 	rcu_cpu_starting(cpu);
4913 	rcutree_online_cpu(cpu);
4914 
4915 	/* Create workqueue for Tree SRCU and for expedited GPs. */
4916 	rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM, 0);
4917 	WARN_ON(!rcu_gp_wq);
4918 	rcu_alloc_par_gp_wq();
4919 
4920 	/* Fill in default value for rcutree.qovld boot parameter. */
4921 	/* -After- the rcu_node ->lock fields are initialized! */
4922 	if (qovld < 0)
4923 		qovld_calc = DEFAULT_RCU_QOVLD_MULT * qhimark;
4924 	else
4925 		qovld_calc = qovld;
4926 
4927 	// Kick-start any polled grace periods that started early.
4928 	if (!(per_cpu_ptr(&rcu_data, cpu)->mynode->exp_seq_poll_rq & 0x1))
4929 		(void)start_poll_synchronize_rcu_expedited();
4930 }
4931 
4932 #include "tree_stall.h"
4933 #include "tree_exp.h"
4934 #include "tree_nocb.h"
4935 #include "tree_plugin.h"
4936