1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
4 *
5 * Copyright IBM Corporation, 2008
6 *
7 * Authors: Dipankar Sarma <dipankar@in.ibm.com>
8 * Manfred Spraul <manfred@colorfullife.com>
9 * Paul E. McKenney <paulmck@linux.ibm.com>
10 *
11 * Based on the original work by Paul McKenney <paulmck@linux.ibm.com>
12 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
13 *
14 * For detailed explanation of Read-Copy Update mechanism see -
15 * Documentation/RCU
16 */
17
18 #define pr_fmt(fmt) "rcu: " fmt
19
20 #include <linux/types.h>
21 #include <linux/kernel.h>
22 #include <linux/init.h>
23 #include <linux/spinlock.h>
24 #include <linux/smp.h>
25 #include <linux/rcupdate_wait.h>
26 #include <linux/interrupt.h>
27 #include <linux/sched.h>
28 #include <linux/sched/debug.h>
29 #include <linux/nmi.h>
30 #include <linux/atomic.h>
31 #include <linux/bitops.h>
32 #include <linux/export.h>
33 #include <linux/completion.h>
34 #include <linux/moduleparam.h>
35 #include <linux/percpu.h>
36 #include <linux/notifier.h>
37 #include <linux/cpu.h>
38 #include <linux/mutex.h>
39 #include <linux/time.h>
40 #include <linux/kernel_stat.h>
41 #include <linux/wait.h>
42 #include <linux/kthread.h>
43 #include <uapi/linux/sched/types.h>
44 #include <linux/prefetch.h>
45 #include <linux/delay.h>
46 #include <linux/random.h>
47 #include <linux/trace_events.h>
48 #include <linux/suspend.h>
49 #include <linux/ftrace.h>
50 #include <linux/tick.h>
51 #include <linux/sysrq.h>
52 #include <linux/kprobes.h>
53 #include <linux/gfp.h>
54 #include <linux/oom.h>
55 #include <linux/smpboot.h>
56 #include <linux/jiffies.h>
57 #include <linux/slab.h>
58 #include <linux/sched/isolation.h>
59 #include <linux/sched/clock.h>
60 #include <linux/vmalloc.h>
61 #include <linux/mm.h>
62 #include <linux/kasan.h>
63 #include "../time/tick-internal.h"
64
65 #include "tree.h"
66 #include "rcu.h"
67
68 #ifdef MODULE_PARAM_PREFIX
69 #undef MODULE_PARAM_PREFIX
70 #endif
71 #define MODULE_PARAM_PREFIX "rcutree."
72
73 /* Data structures. */
74
75 /*
76 * Steal a bit from the bottom of ->dynticks for idle entry/exit
77 * control. Initially this is for TLB flushing.
78 */
79 #define RCU_DYNTICK_CTRL_MASK 0x1
80 #define RCU_DYNTICK_CTRL_CTR (RCU_DYNTICK_CTRL_MASK + 1)
81
82 static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = {
83 .dynticks_nesting = 1,
84 .dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE,
85 .dynticks = ATOMIC_INIT(RCU_DYNTICK_CTRL_CTR),
86 };
87 static struct rcu_state rcu_state = {
88 .level = { &rcu_state.node[0] },
89 .gp_state = RCU_GP_IDLE,
90 .gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT,
91 .barrier_mutex = __MUTEX_INITIALIZER(rcu_state.barrier_mutex),
92 .name = RCU_NAME,
93 .abbr = RCU_ABBR,
94 .exp_mutex = __MUTEX_INITIALIZER(rcu_state.exp_mutex),
95 .exp_wake_mutex = __MUTEX_INITIALIZER(rcu_state.exp_wake_mutex),
96 .ofl_lock = __RAW_SPIN_LOCK_UNLOCKED(rcu_state.ofl_lock),
97 };
98
99 /* Dump rcu_node combining tree at boot to verify correct setup. */
100 static bool dump_tree;
101 module_param(dump_tree, bool, 0444);
102 /* By default, use RCU_SOFTIRQ instead of rcuc kthreads. */
103 static bool use_softirq = true;
104 module_param(use_softirq, bool, 0444);
105 /* Control rcu_node-tree auto-balancing at boot time. */
106 static bool rcu_fanout_exact;
107 module_param(rcu_fanout_exact, bool, 0444);
108 /* Increase (but not decrease) the RCU_FANOUT_LEAF at boot time. */
109 static int rcu_fanout_leaf = RCU_FANOUT_LEAF;
110 module_param(rcu_fanout_leaf, int, 0444);
111 int rcu_num_lvls __read_mostly = RCU_NUM_LVLS;
112 /* Number of rcu_nodes at specified level. */
113 int num_rcu_lvl[] = NUM_RCU_LVL_INIT;
114 int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
115
116 /*
117 * The rcu_scheduler_active variable is initialized to the value
118 * RCU_SCHEDULER_INACTIVE and transitions RCU_SCHEDULER_INIT just before the
119 * first task is spawned. So when this variable is RCU_SCHEDULER_INACTIVE,
120 * RCU can assume that there is but one task, allowing RCU to (for example)
121 * optimize synchronize_rcu() to a simple barrier(). When this variable
122 * is RCU_SCHEDULER_INIT, RCU must actually do all the hard work required
123 * to detect real grace periods. This variable is also used to suppress
124 * boot-time false positives from lockdep-RCU error checking. Finally, it
125 * transitions from RCU_SCHEDULER_INIT to RCU_SCHEDULER_RUNNING after RCU
126 * is fully initialized, including all of its kthreads having been spawned.
127 */
128 int rcu_scheduler_active __read_mostly;
129 EXPORT_SYMBOL_GPL(rcu_scheduler_active);
130
131 /*
132 * The rcu_scheduler_fully_active variable transitions from zero to one
133 * during the early_initcall() processing, which is after the scheduler
134 * is capable of creating new tasks. So RCU processing (for example,
135 * creating tasks for RCU priority boosting) must be delayed until after
136 * rcu_scheduler_fully_active transitions from zero to one. We also
137 * currently delay invocation of any RCU callbacks until after this point.
138 *
139 * It might later prove better for people registering RCU callbacks during
140 * early boot to take responsibility for these callbacks, but one step at
141 * a time.
142 */
143 static int rcu_scheduler_fully_active __read_mostly;
144
145 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
146 unsigned long gps, unsigned long flags);
147 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
148 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
149 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
150 static void invoke_rcu_core(void);
151 static void rcu_report_exp_rdp(struct rcu_data *rdp);
152 static void sync_sched_exp_online_cleanup(int cpu);
153 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp);
154
155 /* rcuc/rcub kthread realtime priority */
156 static int kthread_prio = IS_ENABLED(CONFIG_RCU_BOOST) ? 1 : 0;
157 module_param(kthread_prio, int, 0444);
158
159 /* Delay in jiffies for grace-period initialization delays, debug only. */
160
161 static int gp_preinit_delay;
162 module_param(gp_preinit_delay, int, 0444);
163 static int gp_init_delay;
164 module_param(gp_init_delay, int, 0444);
165 static int gp_cleanup_delay;
166 module_param(gp_cleanup_delay, int, 0444);
167
168 // Add delay to rcu_read_unlock() for strict grace periods.
169 static int rcu_unlock_delay;
170 #ifdef CONFIG_RCU_STRICT_GRACE_PERIOD
171 module_param(rcu_unlock_delay, int, 0444);
172 #endif
173
174 /*
175 * This rcu parameter is runtime-read-only. It reflects
176 * a minimum allowed number of objects which can be cached
177 * per-CPU. Object size is equal to one page. This value
178 * can be changed at boot time.
179 */
180 static int rcu_min_cached_objs = 5;
181 module_param(rcu_min_cached_objs, int, 0444);
182
183 /* Retrieve RCU kthreads priority for rcutorture */
rcu_get_gp_kthreads_prio(void)184 int rcu_get_gp_kthreads_prio(void)
185 {
186 return kthread_prio;
187 }
188 EXPORT_SYMBOL_GPL(rcu_get_gp_kthreads_prio);
189
190 /*
191 * Number of grace periods between delays, normalized by the duration of
192 * the delay. The longer the delay, the more the grace periods between
193 * each delay. The reason for this normalization is that it means that,
194 * for non-zero delays, the overall slowdown of grace periods is constant
195 * regardless of the duration of the delay. This arrangement balances
196 * the need for long delays to increase some race probabilities with the
197 * need for fast grace periods to increase other race probabilities.
198 */
199 #define PER_RCU_NODE_PERIOD 3 /* Number of grace periods between delays. */
200
201 /*
202 * Compute the mask of online CPUs for the specified rcu_node structure.
203 * This will not be stable unless the rcu_node structure's ->lock is
204 * held, but the bit corresponding to the current CPU will be stable
205 * in most contexts.
206 */
rcu_rnp_online_cpus(struct rcu_node * rnp)207 static unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
208 {
209 return READ_ONCE(rnp->qsmaskinitnext);
210 }
211
212 /*
213 * Return true if an RCU grace period is in progress. The READ_ONCE()s
214 * permit this function to be invoked without holding the root rcu_node
215 * structure's ->lock, but of course results can be subject to change.
216 */
rcu_gp_in_progress(void)217 static int rcu_gp_in_progress(void)
218 {
219 return rcu_seq_state(rcu_seq_current(&rcu_state.gp_seq));
220 }
221
222 /*
223 * Return the number of callbacks queued on the specified CPU.
224 * Handles both the nocbs and normal cases.
225 */
rcu_get_n_cbs_cpu(int cpu)226 static long rcu_get_n_cbs_cpu(int cpu)
227 {
228 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
229
230 if (rcu_segcblist_is_enabled(&rdp->cblist))
231 return rcu_segcblist_n_cbs(&rdp->cblist);
232 return 0;
233 }
234
rcu_softirq_qs(void)235 void rcu_softirq_qs(void)
236 {
237 rcu_qs();
238 rcu_preempt_deferred_qs(current);
239 }
240
241 /*
242 * Record entry into an extended quiescent state. This is only to be
243 * called when not already in an extended quiescent state, that is,
244 * RCU is watching prior to the call to this function and is no longer
245 * watching upon return.
246 */
rcu_dynticks_eqs_enter(void)247 static noinstr void rcu_dynticks_eqs_enter(void)
248 {
249 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
250 int seq;
251
252 /*
253 * CPUs seeing atomic_add_return() must see prior RCU read-side
254 * critical sections, and we also must force ordering with the
255 * next idle sojourn.
256 */
257 rcu_dynticks_task_trace_enter(); // Before ->dynticks update!
258 seq = arch_atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
259 // RCU is no longer watching. Better be in extended quiescent state!
260 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
261 (seq & RCU_DYNTICK_CTRL_CTR));
262 /* Better not have special action (TLB flush) pending! */
263 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
264 (seq & RCU_DYNTICK_CTRL_MASK));
265 }
266
267 /*
268 * Record exit from an extended quiescent state. This is only to be
269 * called from an extended quiescent state, that is, RCU is not watching
270 * prior to the call to this function and is watching upon return.
271 */
rcu_dynticks_eqs_exit(void)272 static noinstr void rcu_dynticks_eqs_exit(void)
273 {
274 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
275 int seq;
276
277 /*
278 * CPUs seeing atomic_add_return() must see prior idle sojourns,
279 * and we also must force ordering with the next RCU read-side
280 * critical section.
281 */
282 seq = arch_atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
283 // RCU is now watching. Better not be in an extended quiescent state!
284 rcu_dynticks_task_trace_exit(); // After ->dynticks update!
285 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
286 !(seq & RCU_DYNTICK_CTRL_CTR));
287 if (seq & RCU_DYNTICK_CTRL_MASK) {
288 arch_atomic_andnot(RCU_DYNTICK_CTRL_MASK, &rdp->dynticks);
289 smp_mb__after_atomic(); /* _exit after clearing mask. */
290 }
291 }
292
293 /*
294 * Reset the current CPU's ->dynticks counter to indicate that the
295 * newly onlined CPU is no longer in an extended quiescent state.
296 * This will either leave the counter unchanged, or increment it
297 * to the next non-quiescent value.
298 *
299 * The non-atomic test/increment sequence works because the upper bits
300 * of the ->dynticks counter are manipulated only by the corresponding CPU,
301 * or when the corresponding CPU is offline.
302 */
rcu_dynticks_eqs_online(void)303 static void rcu_dynticks_eqs_online(void)
304 {
305 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
306
307 if (atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR)
308 return;
309 atomic_add(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
310 }
311
312 /*
313 * Is the current CPU in an extended quiescent state?
314 *
315 * No ordering, as we are sampling CPU-local information.
316 */
rcu_dynticks_curr_cpu_in_eqs(void)317 static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void)
318 {
319 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
320
321 return !(arch_atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR);
322 }
323
324 /*
325 * Snapshot the ->dynticks counter with full ordering so as to allow
326 * stable comparison of this counter with past and future snapshots.
327 */
rcu_dynticks_snap(struct rcu_data * rdp)328 static int rcu_dynticks_snap(struct rcu_data *rdp)
329 {
330 int snap = atomic_add_return(0, &rdp->dynticks);
331
332 return snap & ~RCU_DYNTICK_CTRL_MASK;
333 }
334
335 /*
336 * Return true if the snapshot returned from rcu_dynticks_snap()
337 * indicates that RCU is in an extended quiescent state.
338 */
rcu_dynticks_in_eqs(int snap)339 static bool rcu_dynticks_in_eqs(int snap)
340 {
341 return !(snap & RCU_DYNTICK_CTRL_CTR);
342 }
343
344 /*
345 * Return true if the CPU corresponding to the specified rcu_data
346 * structure has spent some time in an extended quiescent state since
347 * rcu_dynticks_snap() returned the specified snapshot.
348 */
rcu_dynticks_in_eqs_since(struct rcu_data * rdp,int snap)349 static bool rcu_dynticks_in_eqs_since(struct rcu_data *rdp, int snap)
350 {
351 return snap != rcu_dynticks_snap(rdp);
352 }
353
354 /*
355 * Return true if the referenced integer is zero while the specified
356 * CPU remains within a single extended quiescent state.
357 */
rcu_dynticks_zero_in_eqs(int cpu,int * vp)358 bool rcu_dynticks_zero_in_eqs(int cpu, int *vp)
359 {
360 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
361 int snap;
362
363 // If not quiescent, force back to earlier extended quiescent state.
364 snap = atomic_read(&rdp->dynticks) & ~(RCU_DYNTICK_CTRL_MASK |
365 RCU_DYNTICK_CTRL_CTR);
366
367 smp_rmb(); // Order ->dynticks and *vp reads.
368 if (READ_ONCE(*vp))
369 return false; // Non-zero, so report failure;
370 smp_rmb(); // Order *vp read and ->dynticks re-read.
371
372 // If still in the same extended quiescent state, we are good!
373 return snap == (atomic_read(&rdp->dynticks) & ~RCU_DYNTICK_CTRL_MASK);
374 }
375
376 /*
377 * Set the special (bottom) bit of the specified CPU so that it
378 * will take special action (such as flushing its TLB) on the
379 * next exit from an extended quiescent state. Returns true if
380 * the bit was successfully set, or false if the CPU was not in
381 * an extended quiescent state.
382 */
rcu_eqs_special_set(int cpu)383 bool rcu_eqs_special_set(int cpu)
384 {
385 int old;
386 int new;
387 int new_old;
388 struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
389
390 new_old = atomic_read(&rdp->dynticks);
391 do {
392 old = new_old;
393 if (old & RCU_DYNTICK_CTRL_CTR)
394 return false;
395 new = old | RCU_DYNTICK_CTRL_MASK;
396 new_old = atomic_cmpxchg(&rdp->dynticks, old, new);
397 } while (new_old != old);
398 return true;
399 }
400
401 /*
402 * Let the RCU core know that this CPU has gone through the scheduler,
403 * which is a quiescent state. This is called when the need for a
404 * quiescent state is urgent, so we burn an atomic operation and full
405 * memory barriers to let the RCU core know about it, regardless of what
406 * this CPU might (or might not) do in the near future.
407 *
408 * We inform the RCU core by emulating a zero-duration dyntick-idle period.
409 *
410 * The caller must have disabled interrupts and must not be idle.
411 */
rcu_momentary_dyntick_idle(void)412 notrace void rcu_momentary_dyntick_idle(void)
413 {
414 int special;
415
416 raw_cpu_write(rcu_data.rcu_need_heavy_qs, false);
417 special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR,
418 &this_cpu_ptr(&rcu_data)->dynticks);
419 /* It is illegal to call this from idle state. */
420 WARN_ON_ONCE(!(special & RCU_DYNTICK_CTRL_CTR));
421 rcu_preempt_deferred_qs(current);
422 }
423 EXPORT_SYMBOL_GPL(rcu_momentary_dyntick_idle);
424
425 /**
426 * rcu_is_cpu_rrupt_from_idle - see if 'interrupted' from idle
427 *
428 * If the current CPU is idle and running at a first-level (not nested)
429 * interrupt, or directly, from idle, return true.
430 *
431 * The caller must have at least disabled IRQs.
432 */
rcu_is_cpu_rrupt_from_idle(void)433 static int rcu_is_cpu_rrupt_from_idle(void)
434 {
435 long nesting;
436
437 /*
438 * Usually called from the tick; but also used from smp_function_call()
439 * for expedited grace periods. This latter can result in running from
440 * the idle task, instead of an actual IPI.
441 */
442 lockdep_assert_irqs_disabled();
443
444 /* Check for counter underflows */
445 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) < 0,
446 "RCU dynticks_nesting counter underflow!");
447 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) <= 0,
448 "RCU dynticks_nmi_nesting counter underflow/zero!");
449
450 /* Are we at first interrupt nesting level? */
451 nesting = __this_cpu_read(rcu_data.dynticks_nmi_nesting);
452 if (nesting > 1)
453 return false;
454
455 /*
456 * If we're not in an interrupt, we must be in the idle task!
457 */
458 WARN_ON_ONCE(!nesting && !is_idle_task(current));
459
460 /* Does CPU appear to be idle from an RCU standpoint? */
461 return __this_cpu_read(rcu_data.dynticks_nesting) == 0;
462 }
463
464 #define DEFAULT_RCU_BLIMIT (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 1000 : 10)
465 // Maximum callbacks per rcu_do_batch ...
466 #define DEFAULT_MAX_RCU_BLIMIT 10000 // ... even during callback flood.
467 static long blimit = DEFAULT_RCU_BLIMIT;
468 #define DEFAULT_RCU_QHIMARK 10000 // If this many pending, ignore blimit.
469 static long qhimark = DEFAULT_RCU_QHIMARK;
470 #define DEFAULT_RCU_QLOMARK 100 // Once only this many pending, use blimit.
471 static long qlowmark = DEFAULT_RCU_QLOMARK;
472 #define DEFAULT_RCU_QOVLD_MULT 2
473 #define DEFAULT_RCU_QOVLD (DEFAULT_RCU_QOVLD_MULT * DEFAULT_RCU_QHIMARK)
474 static long qovld = DEFAULT_RCU_QOVLD; // If this many pending, hammer QS.
475 static long qovld_calc = -1; // No pre-initialization lock acquisitions!
476
477 module_param(blimit, long, 0444);
478 module_param(qhimark, long, 0444);
479 module_param(qlowmark, long, 0444);
480 module_param(qovld, long, 0444);
481
482 static ulong jiffies_till_first_fqs = IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 0 : ULONG_MAX;
483 static ulong jiffies_till_next_fqs = ULONG_MAX;
484 static bool rcu_kick_kthreads;
485 static int rcu_divisor = 7;
486 module_param(rcu_divisor, int, 0644);
487
488 /* Force an exit from rcu_do_batch() after 3 milliseconds. */
489 static long rcu_resched_ns = 3 * NSEC_PER_MSEC;
490 module_param(rcu_resched_ns, long, 0644);
491
492 /*
493 * How long the grace period must be before we start recruiting
494 * quiescent-state help from rcu_note_context_switch().
495 */
496 static ulong jiffies_till_sched_qs = ULONG_MAX;
497 module_param(jiffies_till_sched_qs, ulong, 0444);
498 static ulong jiffies_to_sched_qs; /* See adjust_jiffies_till_sched_qs(). */
499 module_param(jiffies_to_sched_qs, ulong, 0444); /* Display only! */
500
501 /*
502 * Make sure that we give the grace-period kthread time to detect any
503 * idle CPUs before taking active measures to force quiescent states.
504 * However, don't go below 100 milliseconds, adjusted upwards for really
505 * large systems.
506 */
adjust_jiffies_till_sched_qs(void)507 static void adjust_jiffies_till_sched_qs(void)
508 {
509 unsigned long j;
510
511 /* If jiffies_till_sched_qs was specified, respect the request. */
512 if (jiffies_till_sched_qs != ULONG_MAX) {
513 WRITE_ONCE(jiffies_to_sched_qs, jiffies_till_sched_qs);
514 return;
515 }
516 /* Otherwise, set to third fqs scan, but bound below on large system. */
517 j = READ_ONCE(jiffies_till_first_fqs) +
518 2 * READ_ONCE(jiffies_till_next_fqs);
519 if (j < HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV)
520 j = HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
521 pr_info("RCU calculated value of scheduler-enlistment delay is %ld jiffies.\n", j);
522 WRITE_ONCE(jiffies_to_sched_qs, j);
523 }
524
param_set_first_fqs_jiffies(const char * val,const struct kernel_param * kp)525 static int param_set_first_fqs_jiffies(const char *val, const struct kernel_param *kp)
526 {
527 ulong j;
528 int ret = kstrtoul(val, 0, &j);
529
530 if (!ret) {
531 WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : j);
532 adjust_jiffies_till_sched_qs();
533 }
534 return ret;
535 }
536
param_set_next_fqs_jiffies(const char * val,const struct kernel_param * kp)537 static int param_set_next_fqs_jiffies(const char *val, const struct kernel_param *kp)
538 {
539 ulong j;
540 int ret = kstrtoul(val, 0, &j);
541
542 if (!ret) {
543 WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : (j ?: 1));
544 adjust_jiffies_till_sched_qs();
545 }
546 return ret;
547 }
548
549 static struct kernel_param_ops first_fqs_jiffies_ops = {
550 .set = param_set_first_fqs_jiffies,
551 .get = param_get_ulong,
552 };
553
554 static struct kernel_param_ops next_fqs_jiffies_ops = {
555 .set = param_set_next_fqs_jiffies,
556 .get = param_get_ulong,
557 };
558
559 module_param_cb(jiffies_till_first_fqs, &first_fqs_jiffies_ops, &jiffies_till_first_fqs, 0644);
560 module_param_cb(jiffies_till_next_fqs, &next_fqs_jiffies_ops, &jiffies_till_next_fqs, 0644);
561 module_param(rcu_kick_kthreads, bool, 0644);
562
563 static void force_qs_rnp(int (*f)(struct rcu_data *rdp));
564 static int rcu_pending(int user);
565
566 /*
567 * Return the number of RCU GPs completed thus far for debug & stats.
568 */
rcu_get_gp_seq(void)569 unsigned long rcu_get_gp_seq(void)
570 {
571 return READ_ONCE(rcu_state.gp_seq);
572 }
573 EXPORT_SYMBOL_GPL(rcu_get_gp_seq);
574
575 /*
576 * Return the number of RCU expedited batches completed thus far for
577 * debug & stats. Odd numbers mean that a batch is in progress, even
578 * numbers mean idle. The value returned will thus be roughly double
579 * the cumulative batches since boot.
580 */
rcu_exp_batches_completed(void)581 unsigned long rcu_exp_batches_completed(void)
582 {
583 return rcu_state.expedited_sequence;
584 }
585 EXPORT_SYMBOL_GPL(rcu_exp_batches_completed);
586
587 /*
588 * Return the root node of the rcu_state structure.
589 */
rcu_get_root(void)590 static struct rcu_node *rcu_get_root(void)
591 {
592 return &rcu_state.node[0];
593 }
594
595 /*
596 * Send along grace-period-related data for rcutorture diagnostics.
597 */
rcutorture_get_gp_data(enum rcutorture_type test_type,int * flags,unsigned long * gp_seq)598 void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
599 unsigned long *gp_seq)
600 {
601 switch (test_type) {
602 case RCU_FLAVOR:
603 *flags = READ_ONCE(rcu_state.gp_flags);
604 *gp_seq = rcu_seq_current(&rcu_state.gp_seq);
605 break;
606 default:
607 break;
608 }
609 }
610 EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
611
612 /*
613 * Enter an RCU extended quiescent state, which can be either the
614 * idle loop or adaptive-tickless usermode execution.
615 *
616 * We crowbar the ->dynticks_nmi_nesting field to zero to allow for
617 * the possibility of usermode upcalls having messed up our count
618 * of interrupt nesting level during the prior busy period.
619 */
rcu_eqs_enter(bool user)620 static noinstr void rcu_eqs_enter(bool user)
621 {
622 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
623
624 WARN_ON_ONCE(rdp->dynticks_nmi_nesting != DYNTICK_IRQ_NONIDLE);
625 WRITE_ONCE(rdp->dynticks_nmi_nesting, 0);
626 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
627 rdp->dynticks_nesting == 0);
628 if (rdp->dynticks_nesting != 1) {
629 // RCU will still be watching, so just do accounting and leave.
630 rdp->dynticks_nesting--;
631 return;
632 }
633
634 lockdep_assert_irqs_disabled();
635 instrumentation_begin();
636 trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, atomic_read(&rdp->dynticks));
637 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
638 rdp = this_cpu_ptr(&rcu_data);
639 rcu_prepare_for_idle();
640 rcu_preempt_deferred_qs(current);
641
642 // instrumentation for the noinstr rcu_dynticks_eqs_enter()
643 instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
644
645 instrumentation_end();
646 WRITE_ONCE(rdp->dynticks_nesting, 0); /* Avoid irq-access tearing. */
647 // RCU is watching here ...
648 rcu_dynticks_eqs_enter();
649 // ... but is no longer watching here.
650 rcu_dynticks_task_enter();
651 }
652
653 /**
654 * rcu_idle_enter - inform RCU that current CPU is entering idle
655 *
656 * Enter idle mode, in other words, -leave- the mode in which RCU
657 * read-side critical sections can occur. (Though RCU read-side
658 * critical sections can occur in irq handlers in idle, a possibility
659 * handled by irq_enter() and irq_exit().)
660 *
661 * If you add or remove a call to rcu_idle_enter(), be sure to test with
662 * CONFIG_RCU_EQS_DEBUG=y.
663 */
rcu_idle_enter(void)664 void rcu_idle_enter(void)
665 {
666 lockdep_assert_irqs_disabled();
667 rcu_eqs_enter(false);
668 }
669 EXPORT_SYMBOL_GPL(rcu_idle_enter);
670
671 #ifdef CONFIG_NO_HZ_FULL
672 /**
673 * rcu_user_enter - inform RCU that we are resuming userspace.
674 *
675 * Enter RCU idle mode right before resuming userspace. No use of RCU
676 * is permitted between this call and rcu_user_exit(). This way the
677 * CPU doesn't need to maintain the tick for RCU maintenance purposes
678 * when the CPU runs in userspace.
679 *
680 * If you add or remove a call to rcu_user_enter(), be sure to test with
681 * CONFIG_RCU_EQS_DEBUG=y.
682 */
rcu_user_enter(void)683 noinstr void rcu_user_enter(void)
684 {
685 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
686
687 lockdep_assert_irqs_disabled();
688
689 instrumentation_begin();
690 do_nocb_deferred_wakeup(rdp);
691 instrumentation_end();
692
693 rcu_eqs_enter(true);
694 }
695 #endif /* CONFIG_NO_HZ_FULL */
696
697 /**
698 * rcu_nmi_exit - inform RCU of exit from NMI context
699 *
700 * If we are returning from the outermost NMI handler that interrupted an
701 * RCU-idle period, update rdp->dynticks and rdp->dynticks_nmi_nesting
702 * to let the RCU grace-period handling know that the CPU is back to
703 * being RCU-idle.
704 *
705 * If you add or remove a call to rcu_nmi_exit(), be sure to test
706 * with CONFIG_RCU_EQS_DEBUG=y.
707 */
rcu_nmi_exit(void)708 noinstr void rcu_nmi_exit(void)
709 {
710 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
711
712 instrumentation_begin();
713 /*
714 * Check for ->dynticks_nmi_nesting underflow and bad ->dynticks.
715 * (We are exiting an NMI handler, so RCU better be paying attention
716 * to us!)
717 */
718 WARN_ON_ONCE(rdp->dynticks_nmi_nesting <= 0);
719 WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs());
720
721 /*
722 * If the nesting level is not 1, the CPU wasn't RCU-idle, so
723 * leave it in non-RCU-idle state.
724 */
725 if (rdp->dynticks_nmi_nesting != 1) {
726 trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2,
727 atomic_read(&rdp->dynticks));
728 WRITE_ONCE(rdp->dynticks_nmi_nesting, /* No store tearing. */
729 rdp->dynticks_nmi_nesting - 2);
730 instrumentation_end();
731 return;
732 }
733
734 /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
735 trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, atomic_read(&rdp->dynticks));
736 WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */
737
738 if (!in_nmi())
739 rcu_prepare_for_idle();
740
741 // instrumentation for the noinstr rcu_dynticks_eqs_enter()
742 instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
743 instrumentation_end();
744
745 // RCU is watching here ...
746 rcu_dynticks_eqs_enter();
747 // ... but is no longer watching here.
748
749 if (!in_nmi())
750 rcu_dynticks_task_enter();
751 }
752
753 /**
754 * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle
755 *
756 * Exit from an interrupt handler, which might possibly result in entering
757 * idle mode, in other words, leaving the mode in which read-side critical
758 * sections can occur. The caller must have disabled interrupts.
759 *
760 * This code assumes that the idle loop never does anything that might
761 * result in unbalanced calls to irq_enter() and irq_exit(). If your
762 * architecture's idle loop violates this assumption, RCU will give you what
763 * you deserve, good and hard. But very infrequently and irreproducibly.
764 *
765 * Use things like work queues to work around this limitation.
766 *
767 * You have been warned.
768 *
769 * If you add or remove a call to rcu_irq_exit(), be sure to test with
770 * CONFIG_RCU_EQS_DEBUG=y.
771 */
rcu_irq_exit(void)772 void noinstr rcu_irq_exit(void)
773 {
774 lockdep_assert_irqs_disabled();
775 rcu_nmi_exit();
776 }
777
778 /**
779 * rcu_irq_exit_preempt - Inform RCU that current CPU is exiting irq
780 * towards in kernel preemption
781 *
782 * Same as rcu_irq_exit() but has a sanity check that scheduling is safe
783 * from RCU point of view. Invoked from return from interrupt before kernel
784 * preemption.
785 */
rcu_irq_exit_preempt(void)786 void rcu_irq_exit_preempt(void)
787 {
788 lockdep_assert_irqs_disabled();
789 rcu_nmi_exit();
790
791 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) <= 0,
792 "RCU dynticks_nesting counter underflow/zero!");
793 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) !=
794 DYNTICK_IRQ_NONIDLE,
795 "Bad RCU dynticks_nmi_nesting counter\n");
796 RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
797 "RCU in extended quiescent state!");
798 }
799
800 #ifdef CONFIG_PROVE_RCU
801 /**
802 * rcu_irq_exit_check_preempt - Validate that scheduling is possible
803 */
rcu_irq_exit_check_preempt(void)804 void rcu_irq_exit_check_preempt(void)
805 {
806 lockdep_assert_irqs_disabled();
807
808 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) <= 0,
809 "RCU dynticks_nesting counter underflow/zero!");
810 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) !=
811 DYNTICK_IRQ_NONIDLE,
812 "Bad RCU dynticks_nmi_nesting counter\n");
813 RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
814 "RCU in extended quiescent state!");
815 }
816 #endif /* #ifdef CONFIG_PROVE_RCU */
817
818 /*
819 * Wrapper for rcu_irq_exit() where interrupts are enabled.
820 *
821 * If you add or remove a call to rcu_irq_exit_irqson(), be sure to test
822 * with CONFIG_RCU_EQS_DEBUG=y.
823 */
rcu_irq_exit_irqson(void)824 void rcu_irq_exit_irqson(void)
825 {
826 unsigned long flags;
827
828 local_irq_save(flags);
829 rcu_irq_exit();
830 local_irq_restore(flags);
831 }
832
833 /*
834 * Exit an RCU extended quiescent state, which can be either the
835 * idle loop or adaptive-tickless usermode execution.
836 *
837 * We crowbar the ->dynticks_nmi_nesting field to DYNTICK_IRQ_NONIDLE to
838 * allow for the possibility of usermode upcalls messing up our count of
839 * interrupt nesting level during the busy period that is just now starting.
840 */
rcu_eqs_exit(bool user)841 static void noinstr rcu_eqs_exit(bool user)
842 {
843 struct rcu_data *rdp;
844 long oldval;
845
846 lockdep_assert_irqs_disabled();
847 rdp = this_cpu_ptr(&rcu_data);
848 oldval = rdp->dynticks_nesting;
849 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0);
850 if (oldval) {
851 // RCU was already watching, so just do accounting and leave.
852 rdp->dynticks_nesting++;
853 return;
854 }
855 rcu_dynticks_task_exit();
856 // RCU is not watching here ...
857 rcu_dynticks_eqs_exit();
858 // ... but is watching here.
859 instrumentation_begin();
860
861 // instrumentation for the noinstr rcu_dynticks_eqs_exit()
862 instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
863
864 rcu_cleanup_after_idle();
865 trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, atomic_read(&rdp->dynticks));
866 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
867 WRITE_ONCE(rdp->dynticks_nesting, 1);
868 WARN_ON_ONCE(rdp->dynticks_nmi_nesting);
869 WRITE_ONCE(rdp->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE);
870 instrumentation_end();
871 }
872
873 /**
874 * rcu_idle_exit - inform RCU that current CPU is leaving idle
875 *
876 * Exit idle mode, in other words, -enter- the mode in which RCU
877 * read-side critical sections can occur.
878 *
879 * If you add or remove a call to rcu_idle_exit(), be sure to test with
880 * CONFIG_RCU_EQS_DEBUG=y.
881 */
rcu_idle_exit(void)882 void rcu_idle_exit(void)
883 {
884 unsigned long flags;
885
886 local_irq_save(flags);
887 rcu_eqs_exit(false);
888 local_irq_restore(flags);
889 }
890 EXPORT_SYMBOL_GPL(rcu_idle_exit);
891
892 #ifdef CONFIG_NO_HZ_FULL
893 /**
894 * rcu_user_exit - inform RCU that we are exiting userspace.
895 *
896 * Exit RCU idle mode while entering the kernel because it can
897 * run a RCU read side critical section anytime.
898 *
899 * If you add or remove a call to rcu_user_exit(), be sure to test with
900 * CONFIG_RCU_EQS_DEBUG=y.
901 */
rcu_user_exit(void)902 void noinstr rcu_user_exit(void)
903 {
904 rcu_eqs_exit(1);
905 }
906
907 /**
908 * __rcu_irq_enter_check_tick - Enable scheduler tick on CPU if RCU needs it.
909 *
910 * The scheduler tick is not normally enabled when CPUs enter the kernel
911 * from nohz_full userspace execution. After all, nohz_full userspace
912 * execution is an RCU quiescent state and the time executing in the kernel
913 * is quite short. Except of course when it isn't. And it is not hard to
914 * cause a large system to spend tens of seconds or even minutes looping
915 * in the kernel, which can cause a number of problems, include RCU CPU
916 * stall warnings.
917 *
918 * Therefore, if a nohz_full CPU fails to report a quiescent state
919 * in a timely manner, the RCU grace-period kthread sets that CPU's
920 * ->rcu_urgent_qs flag with the expectation that the next interrupt or
921 * exception will invoke this function, which will turn on the scheduler
922 * tick, which will enable RCU to detect that CPU's quiescent states,
923 * for example, due to cond_resched() calls in CONFIG_PREEMPT=n kernels.
924 * The tick will be disabled once a quiescent state is reported for
925 * this CPU.
926 *
927 * Of course, in carefully tuned systems, there might never be an
928 * interrupt or exception. In that case, the RCU grace-period kthread
929 * will eventually cause one to happen. However, in less carefully
930 * controlled environments, this function allows RCU to get what it
931 * needs without creating otherwise useless interruptions.
932 */
__rcu_irq_enter_check_tick(void)933 void __rcu_irq_enter_check_tick(void)
934 {
935 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
936
937 // If we're here from NMI there's nothing to do.
938 if (in_nmi())
939 return;
940
941 RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
942 "Illegal rcu_irq_enter_check_tick() from extended quiescent state");
943
944 if (!tick_nohz_full_cpu(rdp->cpu) ||
945 !READ_ONCE(rdp->rcu_urgent_qs) ||
946 READ_ONCE(rdp->rcu_forced_tick)) {
947 // RCU doesn't need nohz_full help from this CPU, or it is
948 // already getting that help.
949 return;
950 }
951
952 // We get here only when not in an extended quiescent state and
953 // from interrupts (as opposed to NMIs). Therefore, (1) RCU is
954 // already watching and (2) The fact that we are in an interrupt
955 // handler and that the rcu_node lock is an irq-disabled lock
956 // prevents self-deadlock. So we can safely recheck under the lock.
957 // Note that the nohz_full state currently cannot change.
958 raw_spin_lock_rcu_node(rdp->mynode);
959 if (rdp->rcu_urgent_qs && !rdp->rcu_forced_tick) {
960 // A nohz_full CPU is in the kernel and RCU needs a
961 // quiescent state. Turn on the tick!
962 WRITE_ONCE(rdp->rcu_forced_tick, true);
963 tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
964 }
965 raw_spin_unlock_rcu_node(rdp->mynode);
966 }
967 NOKPROBE_SYMBOL(__rcu_irq_enter_check_tick);
968 #endif /* CONFIG_NO_HZ_FULL */
969
970 /**
971 * rcu_nmi_enter - inform RCU of entry to NMI context
972 *
973 * If the CPU was idle from RCU's viewpoint, update rdp->dynticks and
974 * rdp->dynticks_nmi_nesting to let the RCU grace-period handling know
975 * that the CPU is active. This implementation permits nested NMIs, as
976 * long as the nesting level does not overflow an int. (You will probably
977 * run out of stack space first.)
978 *
979 * If you add or remove a call to rcu_nmi_enter(), be sure to test
980 * with CONFIG_RCU_EQS_DEBUG=y.
981 */
rcu_nmi_enter(void)982 noinstr void rcu_nmi_enter(void)
983 {
984 long incby = 2;
985 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
986
987 /* Complain about underflow. */
988 WARN_ON_ONCE(rdp->dynticks_nmi_nesting < 0);
989
990 /*
991 * If idle from RCU viewpoint, atomically increment ->dynticks
992 * to mark non-idle and increment ->dynticks_nmi_nesting by one.
993 * Otherwise, increment ->dynticks_nmi_nesting by two. This means
994 * if ->dynticks_nmi_nesting is equal to one, we are guaranteed
995 * to be in the outermost NMI handler that interrupted an RCU-idle
996 * period (observation due to Andy Lutomirski).
997 */
998 if (rcu_dynticks_curr_cpu_in_eqs()) {
999
1000 if (!in_nmi())
1001 rcu_dynticks_task_exit();
1002
1003 // RCU is not watching here ...
1004 rcu_dynticks_eqs_exit();
1005 // ... but is watching here.
1006
1007 if (!in_nmi()) {
1008 instrumentation_begin();
1009 rcu_cleanup_after_idle();
1010 instrumentation_end();
1011 }
1012
1013 instrumentation_begin();
1014 // instrumentation for the noinstr rcu_dynticks_curr_cpu_in_eqs()
1015 instrument_atomic_read(&rdp->dynticks, sizeof(rdp->dynticks));
1016 // instrumentation for the noinstr rcu_dynticks_eqs_exit()
1017 instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
1018
1019 incby = 1;
1020 } else if (!in_nmi()) {
1021 instrumentation_begin();
1022 rcu_irq_enter_check_tick();
1023 } else {
1024 instrumentation_begin();
1025 }
1026
1027 trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="),
1028 rdp->dynticks_nmi_nesting,
1029 rdp->dynticks_nmi_nesting + incby, atomic_read(&rdp->dynticks));
1030 instrumentation_end();
1031 WRITE_ONCE(rdp->dynticks_nmi_nesting, /* Prevent store tearing. */
1032 rdp->dynticks_nmi_nesting + incby);
1033 barrier();
1034 }
1035
1036 /**
1037 * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
1038 *
1039 * Enter an interrupt handler, which might possibly result in exiting
1040 * idle mode, in other words, entering the mode in which read-side critical
1041 * sections can occur. The caller must have disabled interrupts.
1042 *
1043 * Note that the Linux kernel is fully capable of entering an interrupt
1044 * handler that it never exits, for example when doing upcalls to user mode!
1045 * This code assumes that the idle loop never does upcalls to user mode.
1046 * If your architecture's idle loop does do upcalls to user mode (or does
1047 * anything else that results in unbalanced calls to the irq_enter() and
1048 * irq_exit() functions), RCU will give you what you deserve, good and hard.
1049 * But very infrequently and irreproducibly.
1050 *
1051 * Use things like work queues to work around this limitation.
1052 *
1053 * You have been warned.
1054 *
1055 * If you add or remove a call to rcu_irq_enter(), be sure to test with
1056 * CONFIG_RCU_EQS_DEBUG=y.
1057 */
rcu_irq_enter(void)1058 noinstr void rcu_irq_enter(void)
1059 {
1060 lockdep_assert_irqs_disabled();
1061 rcu_nmi_enter();
1062 }
1063
1064 /*
1065 * Wrapper for rcu_irq_enter() where interrupts are enabled.
1066 *
1067 * If you add or remove a call to rcu_irq_enter_irqson(), be sure to test
1068 * with CONFIG_RCU_EQS_DEBUG=y.
1069 */
rcu_irq_enter_irqson(void)1070 void rcu_irq_enter_irqson(void)
1071 {
1072 unsigned long flags;
1073
1074 local_irq_save(flags);
1075 rcu_irq_enter();
1076 local_irq_restore(flags);
1077 }
1078
1079 /*
1080 * If any sort of urgency was applied to the current CPU (for example,
1081 * the scheduler-clock interrupt was enabled on a nohz_full CPU) in order
1082 * to get to a quiescent state, disable it.
1083 */
rcu_disable_urgency_upon_qs(struct rcu_data * rdp)1084 static void rcu_disable_urgency_upon_qs(struct rcu_data *rdp)
1085 {
1086 raw_lockdep_assert_held_rcu_node(rdp->mynode);
1087 WRITE_ONCE(rdp->rcu_urgent_qs, false);
1088 WRITE_ONCE(rdp->rcu_need_heavy_qs, false);
1089 if (tick_nohz_full_cpu(rdp->cpu) && rdp->rcu_forced_tick) {
1090 tick_dep_clear_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
1091 WRITE_ONCE(rdp->rcu_forced_tick, false);
1092 }
1093 }
1094
1095 /**
1096 * rcu_is_watching - see if RCU thinks that the current CPU is not idle
1097 *
1098 * Return true if RCU is watching the running CPU, which means that this
1099 * CPU can safely enter RCU read-side critical sections. In other words,
1100 * if the current CPU is not in its idle loop or is in an interrupt or
1101 * NMI handler, return true.
1102 *
1103 * Make notrace because it can be called by the internal functions of
1104 * ftrace, and making this notrace removes unnecessary recursion calls.
1105 */
rcu_is_watching(void)1106 notrace bool rcu_is_watching(void)
1107 {
1108 bool ret;
1109
1110 preempt_disable_notrace();
1111 ret = !rcu_dynticks_curr_cpu_in_eqs();
1112 preempt_enable_notrace();
1113 return ret;
1114 }
1115 EXPORT_SYMBOL_GPL(rcu_is_watching);
1116
1117 /*
1118 * If a holdout task is actually running, request an urgent quiescent
1119 * state from its CPU. This is unsynchronized, so migrations can cause
1120 * the request to go to the wrong CPU. Which is OK, all that will happen
1121 * is that the CPU's next context switch will be a bit slower and next
1122 * time around this task will generate another request.
1123 */
rcu_request_urgent_qs_task(struct task_struct * t)1124 void rcu_request_urgent_qs_task(struct task_struct *t)
1125 {
1126 int cpu;
1127
1128 barrier();
1129 cpu = task_cpu(t);
1130 if (!task_curr(t))
1131 return; /* This task is not running on that CPU. */
1132 smp_store_release(per_cpu_ptr(&rcu_data.rcu_urgent_qs, cpu), true);
1133 }
1134
1135 #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
1136
1137 /*
1138 * Is the current CPU online as far as RCU is concerned?
1139 *
1140 * Disable preemption to avoid false positives that could otherwise
1141 * happen due to the current CPU number being sampled, this task being
1142 * preempted, its old CPU being taken offline, resuming on some other CPU,
1143 * then determining that its old CPU is now offline.
1144 *
1145 * Disable checking if in an NMI handler because we cannot safely
1146 * report errors from NMI handlers anyway. In addition, it is OK to use
1147 * RCU on an offline processor during initial boot, hence the check for
1148 * rcu_scheduler_fully_active.
1149 */
rcu_lockdep_current_cpu_online(void)1150 bool rcu_lockdep_current_cpu_online(void)
1151 {
1152 struct rcu_data *rdp;
1153 struct rcu_node *rnp;
1154 bool ret = false;
1155
1156 if (in_nmi() || !rcu_scheduler_fully_active)
1157 return true;
1158 preempt_disable_notrace();
1159 rdp = this_cpu_ptr(&rcu_data);
1160 rnp = rdp->mynode;
1161 if (rdp->grpmask & rcu_rnp_online_cpus(rnp) || READ_ONCE(rnp->ofl_seq) & 0x1)
1162 ret = true;
1163 preempt_enable_notrace();
1164 return ret;
1165 }
1166 EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
1167
1168 #endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */
1169
1170 /*
1171 * We are reporting a quiescent state on behalf of some other CPU, so
1172 * it is our responsibility to check for and handle potential overflow
1173 * of the rcu_node ->gp_seq counter with respect to the rcu_data counters.
1174 * After all, the CPU might be in deep idle state, and thus executing no
1175 * code whatsoever.
1176 */
rcu_gpnum_ovf(struct rcu_node * rnp,struct rcu_data * rdp)1177 static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp)
1178 {
1179 raw_lockdep_assert_held_rcu_node(rnp);
1180 if (ULONG_CMP_LT(rcu_seq_current(&rdp->gp_seq) + ULONG_MAX / 4,
1181 rnp->gp_seq))
1182 WRITE_ONCE(rdp->gpwrap, true);
1183 if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq))
1184 rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4;
1185 }
1186
1187 /*
1188 * Snapshot the specified CPU's dynticks counter so that we can later
1189 * credit them with an implicit quiescent state. Return 1 if this CPU
1190 * is in dynticks idle mode, which is an extended quiescent state.
1191 */
dyntick_save_progress_counter(struct rcu_data * rdp)1192 static int dyntick_save_progress_counter(struct rcu_data *rdp)
1193 {
1194 rdp->dynticks_snap = rcu_dynticks_snap(rdp);
1195 if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) {
1196 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
1197 rcu_gpnum_ovf(rdp->mynode, rdp);
1198 return 1;
1199 }
1200 return 0;
1201 }
1202
1203 /*
1204 * Return true if the specified CPU has passed through a quiescent
1205 * state by virtue of being in or having passed through an dynticks
1206 * idle state since the last call to dyntick_save_progress_counter()
1207 * for this same CPU, or by virtue of having been offline.
1208 */
rcu_implicit_dynticks_qs(struct rcu_data * rdp)1209 static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
1210 {
1211 unsigned long jtsq;
1212 bool *rnhqp;
1213 bool *ruqp;
1214 struct rcu_node *rnp = rdp->mynode;
1215
1216 /*
1217 * If the CPU passed through or entered a dynticks idle phase with
1218 * no active irq/NMI handlers, then we can safely pretend that the CPU
1219 * already acknowledged the request to pass through a quiescent
1220 * state. Either way, that CPU cannot possibly be in an RCU
1221 * read-side critical section that started before the beginning
1222 * of the current RCU grace period.
1223 */
1224 if (rcu_dynticks_in_eqs_since(rdp, rdp->dynticks_snap)) {
1225 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
1226 rcu_gpnum_ovf(rnp, rdp);
1227 return 1;
1228 }
1229
1230 /*
1231 * Complain if a CPU that is considered to be offline from RCU's
1232 * perspective has not yet reported a quiescent state. After all,
1233 * the offline CPU should have reported a quiescent state during
1234 * the CPU-offline process, or, failing that, by rcu_gp_init()
1235 * if it ran concurrently with either the CPU going offline or the
1236 * last task on a leaf rcu_node structure exiting its RCU read-side
1237 * critical section while all CPUs corresponding to that structure
1238 * are offline. This added warning detects bugs in any of these
1239 * code paths.
1240 *
1241 * The rcu_node structure's ->lock is held here, which excludes
1242 * the relevant portions the CPU-hotplug code, the grace-period
1243 * initialization code, and the rcu_read_unlock() code paths.
1244 *
1245 * For more detail, please refer to the "Hotplug CPU" section
1246 * of RCU's Requirements documentation.
1247 */
1248 if (WARN_ON_ONCE(!(rdp->grpmask & rcu_rnp_online_cpus(rnp)))) {
1249 bool onl;
1250 struct rcu_node *rnp1;
1251
1252 pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n",
1253 __func__, rnp->grplo, rnp->grphi, rnp->level,
1254 (long)rnp->gp_seq, (long)rnp->completedqs);
1255 for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent)
1256 pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx ->rcu_gp_init_mask %#lx\n",
1257 __func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext, rnp1->rcu_gp_init_mask);
1258 onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp));
1259 pr_info("%s %d: %c online: %ld(%d) offline: %ld(%d)\n",
1260 __func__, rdp->cpu, ".o"[onl],
1261 (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags,
1262 (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags);
1263 return 1; /* Break things loose after complaining. */
1264 }
1265
1266 /*
1267 * A CPU running for an extended time within the kernel can
1268 * delay RCU grace periods: (1) At age jiffies_to_sched_qs,
1269 * set .rcu_urgent_qs, (2) At age 2*jiffies_to_sched_qs, set
1270 * both .rcu_need_heavy_qs and .rcu_urgent_qs. Note that the
1271 * unsynchronized assignments to the per-CPU rcu_need_heavy_qs
1272 * variable are safe because the assignments are repeated if this
1273 * CPU failed to pass through a quiescent state. This code
1274 * also checks .jiffies_resched in case jiffies_to_sched_qs
1275 * is set way high.
1276 */
1277 jtsq = READ_ONCE(jiffies_to_sched_qs);
1278 ruqp = per_cpu_ptr(&rcu_data.rcu_urgent_qs, rdp->cpu);
1279 rnhqp = &per_cpu(rcu_data.rcu_need_heavy_qs, rdp->cpu);
1280 if (!READ_ONCE(*rnhqp) &&
1281 (time_after(jiffies, rcu_state.gp_start + jtsq * 2) ||
1282 time_after(jiffies, rcu_state.jiffies_resched) ||
1283 rcu_state.cbovld)) {
1284 WRITE_ONCE(*rnhqp, true);
1285 /* Store rcu_need_heavy_qs before rcu_urgent_qs. */
1286 smp_store_release(ruqp, true);
1287 } else if (time_after(jiffies, rcu_state.gp_start + jtsq)) {
1288 WRITE_ONCE(*ruqp, true);
1289 }
1290
1291 /*
1292 * NO_HZ_FULL CPUs can run in-kernel without rcu_sched_clock_irq!
1293 * The above code handles this, but only for straight cond_resched().
1294 * And some in-kernel loops check need_resched() before calling
1295 * cond_resched(), which defeats the above code for CPUs that are
1296 * running in-kernel with scheduling-clock interrupts disabled.
1297 * So hit them over the head with the resched_cpu() hammer!
1298 */
1299 if (tick_nohz_full_cpu(rdp->cpu) &&
1300 (time_after(jiffies, READ_ONCE(rdp->last_fqs_resched) + jtsq * 3) ||
1301 rcu_state.cbovld)) {
1302 WRITE_ONCE(*ruqp, true);
1303 resched_cpu(rdp->cpu);
1304 WRITE_ONCE(rdp->last_fqs_resched, jiffies);
1305 }
1306
1307 /*
1308 * If more than halfway to RCU CPU stall-warning time, invoke
1309 * resched_cpu() more frequently to try to loosen things up a bit.
1310 * Also check to see if the CPU is getting hammered with interrupts,
1311 * but only once per grace period, just to keep the IPIs down to
1312 * a dull roar.
1313 */
1314 if (time_after(jiffies, rcu_state.jiffies_resched)) {
1315 if (time_after(jiffies,
1316 READ_ONCE(rdp->last_fqs_resched) + jtsq)) {
1317 resched_cpu(rdp->cpu);
1318 WRITE_ONCE(rdp->last_fqs_resched, jiffies);
1319 }
1320 if (IS_ENABLED(CONFIG_IRQ_WORK) &&
1321 !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
1322 (rnp->ffmask & rdp->grpmask)) {
1323 init_irq_work(&rdp->rcu_iw, rcu_iw_handler);
1324 atomic_set(&rdp->rcu_iw.flags, IRQ_WORK_HARD_IRQ);
1325 rdp->rcu_iw_pending = true;
1326 rdp->rcu_iw_gp_seq = rnp->gp_seq;
1327 irq_work_queue_on(&rdp->rcu_iw, rdp->cpu);
1328 }
1329 }
1330
1331 return 0;
1332 }
1333
1334 /* Trace-event wrapper function for trace_rcu_future_grace_period. */
trace_rcu_this_gp(struct rcu_node * rnp,struct rcu_data * rdp,unsigned long gp_seq_req,const char * s)1335 static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
1336 unsigned long gp_seq_req, const char *s)
1337 {
1338 trace_rcu_future_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
1339 gp_seq_req, rnp->level,
1340 rnp->grplo, rnp->grphi, s);
1341 }
1342
1343 /*
1344 * rcu_start_this_gp - Request the start of a particular grace period
1345 * @rnp_start: The leaf node of the CPU from which to start.
1346 * @rdp: The rcu_data corresponding to the CPU from which to start.
1347 * @gp_seq_req: The gp_seq of the grace period to start.
1348 *
1349 * Start the specified grace period, as needed to handle newly arrived
1350 * callbacks. The required future grace periods are recorded in each
1351 * rcu_node structure's ->gp_seq_needed field. Returns true if there
1352 * is reason to awaken the grace-period kthread.
1353 *
1354 * The caller must hold the specified rcu_node structure's ->lock, which
1355 * is why the caller is responsible for waking the grace-period kthread.
1356 *
1357 * Returns true if the GP thread needs to be awakened else false.
1358 */
rcu_start_this_gp(struct rcu_node * rnp_start,struct rcu_data * rdp,unsigned long gp_seq_req)1359 static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp,
1360 unsigned long gp_seq_req)
1361 {
1362 bool ret = false;
1363 struct rcu_node *rnp;
1364
1365 /*
1366 * Use funnel locking to either acquire the root rcu_node
1367 * structure's lock or bail out if the need for this grace period
1368 * has already been recorded -- or if that grace period has in
1369 * fact already started. If there is already a grace period in
1370 * progress in a non-leaf node, no recording is needed because the
1371 * end of the grace period will scan the leaf rcu_node structures.
1372 * Note that rnp_start->lock must not be released.
1373 */
1374 raw_lockdep_assert_held_rcu_node(rnp_start);
1375 trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, TPS("Startleaf"));
1376 for (rnp = rnp_start; 1; rnp = rnp->parent) {
1377 if (rnp != rnp_start)
1378 raw_spin_lock_rcu_node(rnp);
1379 if (ULONG_CMP_GE(rnp->gp_seq_needed, gp_seq_req) ||
1380 rcu_seq_started(&rnp->gp_seq, gp_seq_req) ||
1381 (rnp != rnp_start &&
1382 rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))) {
1383 trace_rcu_this_gp(rnp, rdp, gp_seq_req,
1384 TPS("Prestarted"));
1385 goto unlock_out;
1386 }
1387 WRITE_ONCE(rnp->gp_seq_needed, gp_seq_req);
1388 if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) {
1389 /*
1390 * We just marked the leaf or internal node, and a
1391 * grace period is in progress, which means that
1392 * rcu_gp_cleanup() will see the marking. Bail to
1393 * reduce contention.
1394 */
1395 trace_rcu_this_gp(rnp_start, rdp, gp_seq_req,
1396 TPS("Startedleaf"));
1397 goto unlock_out;
1398 }
1399 if (rnp != rnp_start && rnp->parent != NULL)
1400 raw_spin_unlock_rcu_node(rnp);
1401 if (!rnp->parent)
1402 break; /* At root, and perhaps also leaf. */
1403 }
1404
1405 /* If GP already in progress, just leave, otherwise start one. */
1406 if (rcu_gp_in_progress()) {
1407 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot"));
1408 goto unlock_out;
1409 }
1410 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot"));
1411 WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags | RCU_GP_FLAG_INIT);
1412 WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
1413 if (!READ_ONCE(rcu_state.gp_kthread)) {
1414 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread"));
1415 goto unlock_out;
1416 }
1417 trace_rcu_grace_period(rcu_state.name, data_race(rcu_state.gp_seq), TPS("newreq"));
1418 ret = true; /* Caller must wake GP kthread. */
1419 unlock_out:
1420 /* Push furthest requested GP to leaf node and rcu_data structure. */
1421 if (ULONG_CMP_LT(gp_seq_req, rnp->gp_seq_needed)) {
1422 WRITE_ONCE(rnp_start->gp_seq_needed, rnp->gp_seq_needed);
1423 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed);
1424 }
1425 if (rnp != rnp_start)
1426 raw_spin_unlock_rcu_node(rnp);
1427 return ret;
1428 }
1429
1430 /*
1431 * Clean up any old requests for the just-ended grace period. Also return
1432 * whether any additional grace periods have been requested.
1433 */
rcu_future_gp_cleanup(struct rcu_node * rnp)1434 static bool rcu_future_gp_cleanup(struct rcu_node *rnp)
1435 {
1436 bool needmore;
1437 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
1438
1439 needmore = ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed);
1440 if (!needmore)
1441 rnp->gp_seq_needed = rnp->gp_seq; /* Avoid counter wrap. */
1442 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq,
1443 needmore ? TPS("CleanupMore") : TPS("Cleanup"));
1444 return needmore;
1445 }
1446
1447 /*
1448 * Awaken the grace-period kthread. Don't do a self-awaken (unless in an
1449 * interrupt or softirq handler, in which case we just might immediately
1450 * sleep upon return, resulting in a grace-period hang), and don't bother
1451 * awakening when there is nothing for the grace-period kthread to do
1452 * (as in several CPUs raced to awaken, we lost), and finally don't try
1453 * to awaken a kthread that has not yet been created. If all those checks
1454 * are passed, track some debug information and awaken.
1455 *
1456 * So why do the self-wakeup when in an interrupt or softirq handler
1457 * in the grace-period kthread's context? Because the kthread might have
1458 * been interrupted just as it was going to sleep, and just after the final
1459 * pre-sleep check of the awaken condition. In this case, a wakeup really
1460 * is required, and is therefore supplied.
1461 */
rcu_gp_kthread_wake(void)1462 static void rcu_gp_kthread_wake(void)
1463 {
1464 struct task_struct *t = READ_ONCE(rcu_state.gp_kthread);
1465
1466 if ((current == t && !in_irq() && !in_serving_softirq()) ||
1467 !READ_ONCE(rcu_state.gp_flags) || !t)
1468 return;
1469 WRITE_ONCE(rcu_state.gp_wake_time, jiffies);
1470 WRITE_ONCE(rcu_state.gp_wake_seq, READ_ONCE(rcu_state.gp_seq));
1471 swake_up_one(&rcu_state.gp_wq);
1472 }
1473
1474 /*
1475 * If there is room, assign a ->gp_seq number to any callbacks on this
1476 * CPU that have not already been assigned. Also accelerate any callbacks
1477 * that were previously assigned a ->gp_seq number that has since proven
1478 * to be too conservative, which can happen if callbacks get assigned a
1479 * ->gp_seq number while RCU is idle, but with reference to a non-root
1480 * rcu_node structure. This function is idempotent, so it does not hurt
1481 * to call it repeatedly. Returns an flag saying that we should awaken
1482 * the RCU grace-period kthread.
1483 *
1484 * The caller must hold rnp->lock with interrupts disabled.
1485 */
rcu_accelerate_cbs(struct rcu_node * rnp,struct rcu_data * rdp)1486 static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
1487 {
1488 unsigned long gp_seq_req;
1489 bool ret = false;
1490
1491 rcu_lockdep_assert_cblist_protected(rdp);
1492 raw_lockdep_assert_held_rcu_node(rnp);
1493
1494 /* If no pending (not yet ready to invoke) callbacks, nothing to do. */
1495 if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1496 return false;
1497
1498 /*
1499 * Callbacks are often registered with incomplete grace-period
1500 * information. Something about the fact that getting exact
1501 * information requires acquiring a global lock... RCU therefore
1502 * makes a conservative estimate of the grace period number at which
1503 * a given callback will become ready to invoke. The following
1504 * code checks this estimate and improves it when possible, thus
1505 * accelerating callback invocation to an earlier grace-period
1506 * number.
1507 */
1508 gp_seq_req = rcu_seq_snap(&rcu_state.gp_seq);
1509 if (rcu_segcblist_accelerate(&rdp->cblist, gp_seq_req))
1510 ret = rcu_start_this_gp(rnp, rdp, gp_seq_req);
1511
1512 /* Trace depending on how much we were able to accelerate. */
1513 if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL))
1514 trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccWaitCB"));
1515 else
1516 trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccReadyCB"));
1517
1518 return ret;
1519 }
1520
1521 /*
1522 * Similar to rcu_accelerate_cbs(), but does not require that the leaf
1523 * rcu_node structure's ->lock be held. It consults the cached value
1524 * of ->gp_seq_needed in the rcu_data structure, and if that indicates
1525 * that a new grace-period request be made, invokes rcu_accelerate_cbs()
1526 * while holding the leaf rcu_node structure's ->lock.
1527 */
rcu_accelerate_cbs_unlocked(struct rcu_node * rnp,struct rcu_data * rdp)1528 static void rcu_accelerate_cbs_unlocked(struct rcu_node *rnp,
1529 struct rcu_data *rdp)
1530 {
1531 unsigned long c;
1532 bool needwake;
1533
1534 rcu_lockdep_assert_cblist_protected(rdp);
1535 c = rcu_seq_snap(&rcu_state.gp_seq);
1536 if (!READ_ONCE(rdp->gpwrap) && ULONG_CMP_GE(rdp->gp_seq_needed, c)) {
1537 /* Old request still live, so mark recent callbacks. */
1538 (void)rcu_segcblist_accelerate(&rdp->cblist, c);
1539 return;
1540 }
1541 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
1542 needwake = rcu_accelerate_cbs(rnp, rdp);
1543 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
1544 if (needwake)
1545 rcu_gp_kthread_wake();
1546 }
1547
1548 /*
1549 * Move any callbacks whose grace period has completed to the
1550 * RCU_DONE_TAIL sublist, then compact the remaining sublists and
1551 * assign ->gp_seq numbers to any callbacks in the RCU_NEXT_TAIL
1552 * sublist. This function is idempotent, so it does not hurt to
1553 * invoke it repeatedly. As long as it is not invoked -too- often...
1554 * Returns true if the RCU grace-period kthread needs to be awakened.
1555 *
1556 * The caller must hold rnp->lock with interrupts disabled.
1557 */
rcu_advance_cbs(struct rcu_node * rnp,struct rcu_data * rdp)1558 static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
1559 {
1560 rcu_lockdep_assert_cblist_protected(rdp);
1561 raw_lockdep_assert_held_rcu_node(rnp);
1562
1563 /* If no pending (not yet ready to invoke) callbacks, nothing to do. */
1564 if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1565 return false;
1566
1567 /*
1568 * Find all callbacks whose ->gp_seq numbers indicate that they
1569 * are ready to invoke, and put them into the RCU_DONE_TAIL sublist.
1570 */
1571 rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq);
1572
1573 /* Classify any remaining callbacks. */
1574 return rcu_accelerate_cbs(rnp, rdp);
1575 }
1576
1577 /*
1578 * Move and classify callbacks, but only if doing so won't require
1579 * that the RCU grace-period kthread be awakened.
1580 */
rcu_advance_cbs_nowake(struct rcu_node * rnp,struct rcu_data * rdp)1581 static void __maybe_unused rcu_advance_cbs_nowake(struct rcu_node *rnp,
1582 struct rcu_data *rdp)
1583 {
1584 rcu_lockdep_assert_cblist_protected(rdp);
1585 if (!rcu_seq_state(rcu_seq_current(&rnp->gp_seq)) || !raw_spin_trylock_rcu_node(rnp))
1586 return;
1587 // The grace period cannot end while we hold the rcu_node lock.
1588 if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))
1589 WARN_ON_ONCE(rcu_advance_cbs(rnp, rdp));
1590 raw_spin_unlock_rcu_node(rnp);
1591 }
1592
1593 /*
1594 * In CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels, attempt to generate a
1595 * quiescent state. This is intended to be invoked when the CPU notices
1596 * a new grace period.
1597 */
rcu_strict_gp_check_qs(void)1598 static void rcu_strict_gp_check_qs(void)
1599 {
1600 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) {
1601 rcu_read_lock();
1602 rcu_read_unlock();
1603 }
1604 }
1605
1606 /*
1607 * Update CPU-local rcu_data state to record the beginnings and ends of
1608 * grace periods. The caller must hold the ->lock of the leaf rcu_node
1609 * structure corresponding to the current CPU, and must have irqs disabled.
1610 * Returns true if the grace-period kthread needs to be awakened.
1611 */
__note_gp_changes(struct rcu_node * rnp,struct rcu_data * rdp)1612 static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp)
1613 {
1614 bool ret = false;
1615 bool need_qs;
1616 const bool offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
1617 rcu_segcblist_is_offloaded(&rdp->cblist);
1618
1619 raw_lockdep_assert_held_rcu_node(rnp);
1620
1621 if (rdp->gp_seq == rnp->gp_seq)
1622 return false; /* Nothing to do. */
1623
1624 /* Handle the ends of any preceding grace periods first. */
1625 if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) ||
1626 unlikely(READ_ONCE(rdp->gpwrap))) {
1627 if (!offloaded)
1628 ret = rcu_advance_cbs(rnp, rdp); /* Advance CBs. */
1629 rdp->core_needs_qs = false;
1630 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuend"));
1631 } else {
1632 if (!offloaded)
1633 ret = rcu_accelerate_cbs(rnp, rdp); /* Recent CBs. */
1634 if (rdp->core_needs_qs)
1635 rdp->core_needs_qs = !!(rnp->qsmask & rdp->grpmask);
1636 }
1637
1638 /* Now handle the beginnings of any new-to-this-CPU grace periods. */
1639 if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) ||
1640 unlikely(READ_ONCE(rdp->gpwrap))) {
1641 /*
1642 * If the current grace period is waiting for this CPU,
1643 * set up to detect a quiescent state, otherwise don't
1644 * go looking for one.
1645 */
1646 trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, TPS("cpustart"));
1647 need_qs = !!(rnp->qsmask & rdp->grpmask);
1648 rdp->cpu_no_qs.b.norm = need_qs;
1649 rdp->core_needs_qs = need_qs;
1650 zero_cpu_stall_ticks(rdp);
1651 }
1652 rdp->gp_seq = rnp->gp_seq; /* Remember new grace-period state. */
1653 if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap)
1654 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed);
1655 WRITE_ONCE(rdp->gpwrap, false);
1656 rcu_gpnum_ovf(rnp, rdp);
1657 return ret;
1658 }
1659
note_gp_changes(struct rcu_data * rdp)1660 static void note_gp_changes(struct rcu_data *rdp)
1661 {
1662 unsigned long flags;
1663 bool needwake;
1664 struct rcu_node *rnp;
1665
1666 local_irq_save(flags);
1667 rnp = rdp->mynode;
1668 if ((rdp->gp_seq == rcu_seq_current(&rnp->gp_seq) &&
1669 !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */
1670 !raw_spin_trylock_rcu_node(rnp)) { /* irqs already off, so later. */
1671 local_irq_restore(flags);
1672 return;
1673 }
1674 needwake = __note_gp_changes(rnp, rdp);
1675 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1676 rcu_strict_gp_check_qs();
1677 if (needwake)
1678 rcu_gp_kthread_wake();
1679 }
1680
rcu_gp_slow(int delay)1681 static void rcu_gp_slow(int delay)
1682 {
1683 if (delay > 0 &&
1684 !(rcu_seq_ctr(rcu_state.gp_seq) %
1685 (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay)))
1686 schedule_timeout_idle(delay);
1687 }
1688
1689 static unsigned long sleep_duration;
1690
1691 /* Allow rcutorture to stall the grace-period kthread. */
rcu_gp_set_torture_wait(int duration)1692 void rcu_gp_set_torture_wait(int duration)
1693 {
1694 if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST) && duration > 0)
1695 WRITE_ONCE(sleep_duration, duration);
1696 }
1697 EXPORT_SYMBOL_GPL(rcu_gp_set_torture_wait);
1698
1699 /* Actually implement the aforementioned wait. */
rcu_gp_torture_wait(void)1700 static void rcu_gp_torture_wait(void)
1701 {
1702 unsigned long duration;
1703
1704 if (!IS_ENABLED(CONFIG_RCU_TORTURE_TEST))
1705 return;
1706 duration = xchg(&sleep_duration, 0UL);
1707 if (duration > 0) {
1708 pr_alert("%s: Waiting %lu jiffies\n", __func__, duration);
1709 schedule_timeout_idle(duration);
1710 pr_alert("%s: Wait complete\n", __func__);
1711 }
1712 }
1713
1714 /*
1715 * Handler for on_each_cpu() to invoke the target CPU's RCU core
1716 * processing.
1717 */
rcu_strict_gp_boundary(void * unused)1718 static void rcu_strict_gp_boundary(void *unused)
1719 {
1720 invoke_rcu_core();
1721 }
1722
1723 /*
1724 * Initialize a new grace period. Return false if no grace period required.
1725 */
rcu_gp_init(void)1726 static bool rcu_gp_init(void)
1727 {
1728 unsigned long firstseq;
1729 unsigned long flags;
1730 unsigned long oldmask;
1731 unsigned long mask;
1732 struct rcu_data *rdp;
1733 struct rcu_node *rnp = rcu_get_root();
1734
1735 WRITE_ONCE(rcu_state.gp_activity, jiffies);
1736 raw_spin_lock_irq_rcu_node(rnp);
1737 if (!READ_ONCE(rcu_state.gp_flags)) {
1738 /* Spurious wakeup, tell caller to go back to sleep. */
1739 raw_spin_unlock_irq_rcu_node(rnp);
1740 return false;
1741 }
1742 WRITE_ONCE(rcu_state.gp_flags, 0); /* Clear all flags: New GP. */
1743
1744 if (WARN_ON_ONCE(rcu_gp_in_progress())) {
1745 /*
1746 * Grace period already in progress, don't start another.
1747 * Not supposed to be able to happen.
1748 */
1749 raw_spin_unlock_irq_rcu_node(rnp);
1750 return false;
1751 }
1752
1753 /* Advance to a new grace period and initialize state. */
1754 record_gp_stall_check_time();
1755 /* Record GP times before starting GP, hence rcu_seq_start(). */
1756 rcu_seq_start(&rcu_state.gp_seq);
1757 ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq);
1758 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("start"));
1759 raw_spin_unlock_irq_rcu_node(rnp);
1760
1761 /*
1762 * Apply per-leaf buffered online and offline operations to
1763 * the rcu_node tree. Note that this new grace period need not
1764 * wait for subsequent online CPUs, and that RCU hooks in the CPU
1765 * offlining path, when combined with checks in this function,
1766 * will handle CPUs that are currently going offline or that will
1767 * go offline later. Please also refer to "Hotplug CPU" section
1768 * of RCU's Requirements documentation.
1769 */
1770 rcu_state.gp_state = RCU_GP_ONOFF;
1771 rcu_for_each_leaf_node(rnp) {
1772 smp_mb(); // Pair with barriers used when updating ->ofl_seq to odd values.
1773 firstseq = READ_ONCE(rnp->ofl_seq);
1774 if (firstseq & 0x1)
1775 while (firstseq == READ_ONCE(rnp->ofl_seq))
1776 schedule_timeout_idle(1); // Can't wake unless RCU is watching.
1777 smp_mb(); // Pair with barriers used when updating ->ofl_seq to even values.
1778 raw_spin_lock(&rcu_state.ofl_lock);
1779 raw_spin_lock_irq_rcu_node(rnp);
1780 if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
1781 !rnp->wait_blkd_tasks) {
1782 /* Nothing to do on this leaf rcu_node structure. */
1783 raw_spin_unlock_irq_rcu_node(rnp);
1784 raw_spin_unlock(&rcu_state.ofl_lock);
1785 continue;
1786 }
1787
1788 /* Record old state, apply changes to ->qsmaskinit field. */
1789 oldmask = rnp->qsmaskinit;
1790 rnp->qsmaskinit = rnp->qsmaskinitnext;
1791
1792 /* If zero-ness of ->qsmaskinit changed, propagate up tree. */
1793 if (!oldmask != !rnp->qsmaskinit) {
1794 if (!oldmask) { /* First online CPU for rcu_node. */
1795 if (!rnp->wait_blkd_tasks) /* Ever offline? */
1796 rcu_init_new_rnp(rnp);
1797 } else if (rcu_preempt_has_tasks(rnp)) {
1798 rnp->wait_blkd_tasks = true; /* blocked tasks */
1799 } else { /* Last offline CPU and can propagate. */
1800 rcu_cleanup_dead_rnp(rnp);
1801 }
1802 }
1803
1804 /*
1805 * If all waited-on tasks from prior grace period are
1806 * done, and if all this rcu_node structure's CPUs are
1807 * still offline, propagate up the rcu_node tree and
1808 * clear ->wait_blkd_tasks. Otherwise, if one of this
1809 * rcu_node structure's CPUs has since come back online,
1810 * simply clear ->wait_blkd_tasks.
1811 */
1812 if (rnp->wait_blkd_tasks &&
1813 (!rcu_preempt_has_tasks(rnp) || rnp->qsmaskinit)) {
1814 rnp->wait_blkd_tasks = false;
1815 if (!rnp->qsmaskinit)
1816 rcu_cleanup_dead_rnp(rnp);
1817 }
1818
1819 raw_spin_unlock_irq_rcu_node(rnp);
1820 raw_spin_unlock(&rcu_state.ofl_lock);
1821 }
1822 rcu_gp_slow(gp_preinit_delay); /* Races with CPU hotplug. */
1823
1824 /*
1825 * Set the quiescent-state-needed bits in all the rcu_node
1826 * structures for all currently online CPUs in breadth-first
1827 * order, starting from the root rcu_node structure, relying on the
1828 * layout of the tree within the rcu_state.node[] array. Note that
1829 * other CPUs will access only the leaves of the hierarchy, thus
1830 * seeing that no grace period is in progress, at least until the
1831 * corresponding leaf node has been initialized.
1832 *
1833 * The grace period cannot complete until the initialization
1834 * process finishes, because this kthread handles both.
1835 */
1836 rcu_state.gp_state = RCU_GP_INIT;
1837 rcu_for_each_node_breadth_first(rnp) {
1838 rcu_gp_slow(gp_init_delay);
1839 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1840 rdp = this_cpu_ptr(&rcu_data);
1841 rcu_preempt_check_blocked_tasks(rnp);
1842 rnp->qsmask = rnp->qsmaskinit;
1843 WRITE_ONCE(rnp->gp_seq, rcu_state.gp_seq);
1844 if (rnp == rdp->mynode)
1845 (void)__note_gp_changes(rnp, rdp);
1846 rcu_preempt_boost_start_gp(rnp);
1847 trace_rcu_grace_period_init(rcu_state.name, rnp->gp_seq,
1848 rnp->level, rnp->grplo,
1849 rnp->grphi, rnp->qsmask);
1850 /* Quiescent states for tasks on any now-offline CPUs. */
1851 mask = rnp->qsmask & ~rnp->qsmaskinitnext;
1852 rnp->rcu_gp_init_mask = mask;
1853 if ((mask || rnp->wait_blkd_tasks) && rcu_is_leaf_node(rnp))
1854 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
1855 else
1856 raw_spin_unlock_irq_rcu_node(rnp);
1857 cond_resched_tasks_rcu_qs();
1858 WRITE_ONCE(rcu_state.gp_activity, jiffies);
1859 }
1860
1861 // If strict, make all CPUs aware of new grace period.
1862 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
1863 on_each_cpu(rcu_strict_gp_boundary, NULL, 0);
1864
1865 return true;
1866 }
1867
1868 /*
1869 * Helper function for swait_event_idle_exclusive() wakeup at force-quiescent-state
1870 * time.
1871 */
rcu_gp_fqs_check_wake(int * gfp)1872 static bool rcu_gp_fqs_check_wake(int *gfp)
1873 {
1874 struct rcu_node *rnp = rcu_get_root();
1875
1876 // If under overload conditions, force an immediate FQS scan.
1877 if (*gfp & RCU_GP_FLAG_OVLD)
1878 return true;
1879
1880 // Someone like call_rcu() requested a force-quiescent-state scan.
1881 *gfp = READ_ONCE(rcu_state.gp_flags);
1882 if (*gfp & RCU_GP_FLAG_FQS)
1883 return true;
1884
1885 // The current grace period has completed.
1886 if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp))
1887 return true;
1888
1889 return false;
1890 }
1891
1892 /*
1893 * Do one round of quiescent-state forcing.
1894 */
rcu_gp_fqs(bool first_time)1895 static void rcu_gp_fqs(bool first_time)
1896 {
1897 struct rcu_node *rnp = rcu_get_root();
1898
1899 WRITE_ONCE(rcu_state.gp_activity, jiffies);
1900 WRITE_ONCE(rcu_state.n_force_qs, rcu_state.n_force_qs + 1);
1901 if (first_time) {
1902 /* Collect dyntick-idle snapshots. */
1903 force_qs_rnp(dyntick_save_progress_counter);
1904 } else {
1905 /* Handle dyntick-idle and offline CPUs. */
1906 force_qs_rnp(rcu_implicit_dynticks_qs);
1907 }
1908 /* Clear flag to prevent immediate re-entry. */
1909 if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
1910 raw_spin_lock_irq_rcu_node(rnp);
1911 WRITE_ONCE(rcu_state.gp_flags,
1912 READ_ONCE(rcu_state.gp_flags) & ~RCU_GP_FLAG_FQS);
1913 raw_spin_unlock_irq_rcu_node(rnp);
1914 }
1915 }
1916
1917 /*
1918 * Loop doing repeated quiescent-state forcing until the grace period ends.
1919 */
rcu_gp_fqs_loop(void)1920 static void rcu_gp_fqs_loop(void)
1921 {
1922 bool first_gp_fqs;
1923 int gf = 0;
1924 unsigned long j;
1925 int ret;
1926 struct rcu_node *rnp = rcu_get_root();
1927
1928 first_gp_fqs = true;
1929 j = READ_ONCE(jiffies_till_first_fqs);
1930 if (rcu_state.cbovld)
1931 gf = RCU_GP_FLAG_OVLD;
1932 ret = 0;
1933 for (;;) {
1934 if (!ret) {
1935 rcu_state.jiffies_force_qs = jiffies + j;
1936 WRITE_ONCE(rcu_state.jiffies_kick_kthreads,
1937 jiffies + (j ? 3 * j : 2));
1938 }
1939 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1940 TPS("fqswait"));
1941 rcu_state.gp_state = RCU_GP_WAIT_FQS;
1942 ret = swait_event_idle_timeout_exclusive(
1943 rcu_state.gp_wq, rcu_gp_fqs_check_wake(&gf), j);
1944 rcu_gp_torture_wait();
1945 rcu_state.gp_state = RCU_GP_DOING_FQS;
1946 /* Locking provides needed memory barriers. */
1947 /* If grace period done, leave loop. */
1948 if (!READ_ONCE(rnp->qsmask) &&
1949 !rcu_preempt_blocked_readers_cgp(rnp))
1950 break;
1951 /* If time for quiescent-state forcing, do it. */
1952 if (!time_after(rcu_state.jiffies_force_qs, jiffies) ||
1953 (gf & (RCU_GP_FLAG_FQS | RCU_GP_FLAG_OVLD))) {
1954 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1955 TPS("fqsstart"));
1956 rcu_gp_fqs(first_gp_fqs);
1957 gf = 0;
1958 if (first_gp_fqs) {
1959 first_gp_fqs = false;
1960 gf = rcu_state.cbovld ? RCU_GP_FLAG_OVLD : 0;
1961 }
1962 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1963 TPS("fqsend"));
1964 cond_resched_tasks_rcu_qs();
1965 WRITE_ONCE(rcu_state.gp_activity, jiffies);
1966 ret = 0; /* Force full wait till next FQS. */
1967 j = READ_ONCE(jiffies_till_next_fqs);
1968 } else {
1969 /* Deal with stray signal. */
1970 cond_resched_tasks_rcu_qs();
1971 WRITE_ONCE(rcu_state.gp_activity, jiffies);
1972 WARN_ON(signal_pending(current));
1973 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1974 TPS("fqswaitsig"));
1975 ret = 1; /* Keep old FQS timing. */
1976 j = jiffies;
1977 if (time_after(jiffies, rcu_state.jiffies_force_qs))
1978 j = 1;
1979 else
1980 j = rcu_state.jiffies_force_qs - j;
1981 gf = 0;
1982 }
1983 }
1984 }
1985
1986 /*
1987 * Clean up after the old grace period.
1988 */
rcu_gp_cleanup(void)1989 static void rcu_gp_cleanup(void)
1990 {
1991 int cpu;
1992 bool needgp = false;
1993 unsigned long gp_duration;
1994 unsigned long new_gp_seq;
1995 bool offloaded;
1996 struct rcu_data *rdp;
1997 struct rcu_node *rnp = rcu_get_root();
1998 struct swait_queue_head *sq;
1999
2000 WRITE_ONCE(rcu_state.gp_activity, jiffies);
2001 raw_spin_lock_irq_rcu_node(rnp);
2002 rcu_state.gp_end = jiffies;
2003 gp_duration = rcu_state.gp_end - rcu_state.gp_start;
2004 if (gp_duration > rcu_state.gp_max)
2005 rcu_state.gp_max = gp_duration;
2006
2007 /*
2008 * We know the grace period is complete, but to everyone else
2009 * it appears to still be ongoing. But it is also the case
2010 * that to everyone else it looks like there is nothing that
2011 * they can do to advance the grace period. It is therefore
2012 * safe for us to drop the lock in order to mark the grace
2013 * period as completed in all of the rcu_node structures.
2014 */
2015 raw_spin_unlock_irq_rcu_node(rnp);
2016
2017 /*
2018 * Propagate new ->gp_seq value to rcu_node structures so that
2019 * other CPUs don't have to wait until the start of the next grace
2020 * period to process their callbacks. This also avoids some nasty
2021 * RCU grace-period initialization races by forcing the end of
2022 * the current grace period to be completely recorded in all of
2023 * the rcu_node structures before the beginning of the next grace
2024 * period is recorded in any of the rcu_node structures.
2025 */
2026 new_gp_seq = rcu_state.gp_seq;
2027 rcu_seq_end(&new_gp_seq);
2028 rcu_for_each_node_breadth_first(rnp) {
2029 raw_spin_lock_irq_rcu_node(rnp);
2030 if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
2031 dump_blkd_tasks(rnp, 10);
2032 WARN_ON_ONCE(rnp->qsmask);
2033 WRITE_ONCE(rnp->gp_seq, new_gp_seq);
2034 rdp = this_cpu_ptr(&rcu_data);
2035 if (rnp == rdp->mynode)
2036 needgp = __note_gp_changes(rnp, rdp) || needgp;
2037 /* smp_mb() provided by prior unlock-lock pair. */
2038 needgp = rcu_future_gp_cleanup(rnp) || needgp;
2039 // Reset overload indication for CPUs no longer overloaded
2040 if (rcu_is_leaf_node(rnp))
2041 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->cbovldmask) {
2042 rdp = per_cpu_ptr(&rcu_data, cpu);
2043 check_cb_ovld_locked(rdp, rnp);
2044 }
2045 sq = rcu_nocb_gp_get(rnp);
2046 raw_spin_unlock_irq_rcu_node(rnp);
2047 rcu_nocb_gp_cleanup(sq);
2048 cond_resched_tasks_rcu_qs();
2049 WRITE_ONCE(rcu_state.gp_activity, jiffies);
2050 rcu_gp_slow(gp_cleanup_delay);
2051 }
2052 rnp = rcu_get_root();
2053 raw_spin_lock_irq_rcu_node(rnp); /* GP before ->gp_seq update. */
2054
2055 /* Declare grace period done, trace first to use old GP number. */
2056 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("end"));
2057 rcu_seq_end(&rcu_state.gp_seq);
2058 ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq);
2059 rcu_state.gp_state = RCU_GP_IDLE;
2060 /* Check for GP requests since above loop. */
2061 rdp = this_cpu_ptr(&rcu_data);
2062 if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) {
2063 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed,
2064 TPS("CleanupMore"));
2065 needgp = true;
2066 }
2067 /* Advance CBs to reduce false positives below. */
2068 offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
2069 rcu_segcblist_is_offloaded(&rdp->cblist);
2070 if ((offloaded || !rcu_accelerate_cbs(rnp, rdp)) && needgp) {
2071 WRITE_ONCE(rcu_state.gp_flags, RCU_GP_FLAG_INIT);
2072 WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
2073 trace_rcu_grace_period(rcu_state.name,
2074 rcu_state.gp_seq,
2075 TPS("newreq"));
2076 } else {
2077 WRITE_ONCE(rcu_state.gp_flags,
2078 rcu_state.gp_flags & RCU_GP_FLAG_INIT);
2079 }
2080 raw_spin_unlock_irq_rcu_node(rnp);
2081
2082 // If strict, make all CPUs aware of the end of the old grace period.
2083 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
2084 on_each_cpu(rcu_strict_gp_boundary, NULL, 0);
2085 }
2086
2087 /*
2088 * Body of kthread that handles grace periods.
2089 */
rcu_gp_kthread(void * unused)2090 static int __noreturn rcu_gp_kthread(void *unused)
2091 {
2092 rcu_bind_gp_kthread();
2093 for (;;) {
2094
2095 /* Handle grace-period start. */
2096 for (;;) {
2097 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2098 TPS("reqwait"));
2099 rcu_state.gp_state = RCU_GP_WAIT_GPS;
2100 swait_event_idle_exclusive(rcu_state.gp_wq,
2101 READ_ONCE(rcu_state.gp_flags) &
2102 RCU_GP_FLAG_INIT);
2103 rcu_gp_torture_wait();
2104 rcu_state.gp_state = RCU_GP_DONE_GPS;
2105 /* Locking provides needed memory barrier. */
2106 if (rcu_gp_init())
2107 break;
2108 cond_resched_tasks_rcu_qs();
2109 WRITE_ONCE(rcu_state.gp_activity, jiffies);
2110 WARN_ON(signal_pending(current));
2111 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2112 TPS("reqwaitsig"));
2113 }
2114
2115 /* Handle quiescent-state forcing. */
2116 rcu_gp_fqs_loop();
2117
2118 /* Handle grace-period end. */
2119 rcu_state.gp_state = RCU_GP_CLEANUP;
2120 rcu_gp_cleanup();
2121 rcu_state.gp_state = RCU_GP_CLEANED;
2122 }
2123 }
2124
2125 /*
2126 * Report a full set of quiescent states to the rcu_state data structure.
2127 * Invoke rcu_gp_kthread_wake() to awaken the grace-period kthread if
2128 * another grace period is required. Whether we wake the grace-period
2129 * kthread or it awakens itself for the next round of quiescent-state
2130 * forcing, that kthread will clean up after the just-completed grace
2131 * period. Note that the caller must hold rnp->lock, which is released
2132 * before return.
2133 */
rcu_report_qs_rsp(unsigned long flags)2134 static void rcu_report_qs_rsp(unsigned long flags)
2135 __releases(rcu_get_root()->lock)
2136 {
2137 raw_lockdep_assert_held_rcu_node(rcu_get_root());
2138 WARN_ON_ONCE(!rcu_gp_in_progress());
2139 WRITE_ONCE(rcu_state.gp_flags,
2140 READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS);
2141 raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(), flags);
2142 rcu_gp_kthread_wake();
2143 }
2144
2145 /*
2146 * Similar to rcu_report_qs_rdp(), for which it is a helper function.
2147 * Allows quiescent states for a group of CPUs to be reported at one go
2148 * to the specified rcu_node structure, though all the CPUs in the group
2149 * must be represented by the same rcu_node structure (which need not be a
2150 * leaf rcu_node structure, though it often will be). The gps parameter
2151 * is the grace-period snapshot, which means that the quiescent states
2152 * are valid only if rnp->gp_seq is equal to gps. That structure's lock
2153 * must be held upon entry, and it is released before return.
2154 *
2155 * As a special case, if mask is zero, the bit-already-cleared check is
2156 * disabled. This allows propagating quiescent state due to resumed tasks
2157 * during grace-period initialization.
2158 */
rcu_report_qs_rnp(unsigned long mask,struct rcu_node * rnp,unsigned long gps,unsigned long flags)2159 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
2160 unsigned long gps, unsigned long flags)
2161 __releases(rnp->lock)
2162 {
2163 unsigned long oldmask = 0;
2164 struct rcu_node *rnp_c;
2165
2166 raw_lockdep_assert_held_rcu_node(rnp);
2167
2168 /* Walk up the rcu_node hierarchy. */
2169 for (;;) {
2170 if ((!(rnp->qsmask & mask) && mask) || rnp->gp_seq != gps) {
2171
2172 /*
2173 * Our bit has already been cleared, or the
2174 * relevant grace period is already over, so done.
2175 */
2176 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2177 return;
2178 }
2179 WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */
2180 WARN_ON_ONCE(!rcu_is_leaf_node(rnp) &&
2181 rcu_preempt_blocked_readers_cgp(rnp));
2182 WRITE_ONCE(rnp->qsmask, rnp->qsmask & ~mask);
2183 trace_rcu_quiescent_state_report(rcu_state.name, rnp->gp_seq,
2184 mask, rnp->qsmask, rnp->level,
2185 rnp->grplo, rnp->grphi,
2186 !!rnp->gp_tasks);
2187 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
2188
2189 /* Other bits still set at this level, so done. */
2190 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2191 return;
2192 }
2193 rnp->completedqs = rnp->gp_seq;
2194 mask = rnp->grpmask;
2195 if (rnp->parent == NULL) {
2196
2197 /* No more levels. Exit loop holding root lock. */
2198
2199 break;
2200 }
2201 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2202 rnp_c = rnp;
2203 rnp = rnp->parent;
2204 raw_spin_lock_irqsave_rcu_node(rnp, flags);
2205 oldmask = READ_ONCE(rnp_c->qsmask);
2206 }
2207
2208 /*
2209 * Get here if we are the last CPU to pass through a quiescent
2210 * state for this grace period. Invoke rcu_report_qs_rsp()
2211 * to clean up and start the next grace period if one is needed.
2212 */
2213 rcu_report_qs_rsp(flags); /* releases rnp->lock. */
2214 }
2215
2216 /*
2217 * Record a quiescent state for all tasks that were previously queued
2218 * on the specified rcu_node structure and that were blocking the current
2219 * RCU grace period. The caller must hold the corresponding rnp->lock with
2220 * irqs disabled, and this lock is released upon return, but irqs remain
2221 * disabled.
2222 */
2223 static void __maybe_unused
rcu_report_unblock_qs_rnp(struct rcu_node * rnp,unsigned long flags)2224 rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
2225 __releases(rnp->lock)
2226 {
2227 unsigned long gps;
2228 unsigned long mask;
2229 struct rcu_node *rnp_p;
2230
2231 raw_lockdep_assert_held_rcu_node(rnp);
2232 if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT_RCU)) ||
2233 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)) ||
2234 rnp->qsmask != 0) {
2235 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2236 return; /* Still need more quiescent states! */
2237 }
2238
2239 rnp->completedqs = rnp->gp_seq;
2240 rnp_p = rnp->parent;
2241 if (rnp_p == NULL) {
2242 /*
2243 * Only one rcu_node structure in the tree, so don't
2244 * try to report up to its nonexistent parent!
2245 */
2246 rcu_report_qs_rsp(flags);
2247 return;
2248 }
2249
2250 /* Report up the rest of the hierarchy, tracking current ->gp_seq. */
2251 gps = rnp->gp_seq;
2252 mask = rnp->grpmask;
2253 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
2254 raw_spin_lock_rcu_node(rnp_p); /* irqs already disabled. */
2255 rcu_report_qs_rnp(mask, rnp_p, gps, flags);
2256 }
2257
2258 /*
2259 * Record a quiescent state for the specified CPU to that CPU's rcu_data
2260 * structure. This must be called from the specified CPU.
2261 */
2262 static void
rcu_report_qs_rdp(struct rcu_data * rdp)2263 rcu_report_qs_rdp(struct rcu_data *rdp)
2264 {
2265 unsigned long flags;
2266 unsigned long mask;
2267 bool needwake = false;
2268 const bool offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
2269 rcu_segcblist_is_offloaded(&rdp->cblist);
2270 struct rcu_node *rnp;
2271
2272 WARN_ON_ONCE(rdp->cpu != smp_processor_id());
2273 rnp = rdp->mynode;
2274 raw_spin_lock_irqsave_rcu_node(rnp, flags);
2275 if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq ||
2276 rdp->gpwrap) {
2277
2278 /*
2279 * The grace period in which this quiescent state was
2280 * recorded has ended, so don't report it upwards.
2281 * We will instead need a new quiescent state that lies
2282 * within the current grace period.
2283 */
2284 rdp->cpu_no_qs.b.norm = true; /* need qs for new gp. */
2285 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2286 return;
2287 }
2288 mask = rdp->grpmask;
2289 rdp->core_needs_qs = false;
2290 if ((rnp->qsmask & mask) == 0) {
2291 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2292 } else {
2293 /*
2294 * This GP can't end until cpu checks in, so all of our
2295 * callbacks can be processed during the next GP.
2296 */
2297 if (!offloaded)
2298 needwake = rcu_accelerate_cbs(rnp, rdp);
2299
2300 rcu_disable_urgency_upon_qs(rdp);
2301 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
2302 /* ^^^ Released rnp->lock */
2303 if (needwake)
2304 rcu_gp_kthread_wake();
2305 }
2306 }
2307
2308 /*
2309 * Check to see if there is a new grace period of which this CPU
2310 * is not yet aware, and if so, set up local rcu_data state for it.
2311 * Otherwise, see if this CPU has just passed through its first
2312 * quiescent state for this grace period, and record that fact if so.
2313 */
2314 static void
rcu_check_quiescent_state(struct rcu_data * rdp)2315 rcu_check_quiescent_state(struct rcu_data *rdp)
2316 {
2317 /* Check for grace-period ends and beginnings. */
2318 note_gp_changes(rdp);
2319
2320 /*
2321 * Does this CPU still need to do its part for current grace period?
2322 * If no, return and let the other CPUs do their part as well.
2323 */
2324 if (!rdp->core_needs_qs)
2325 return;
2326
2327 /*
2328 * Was there a quiescent state since the beginning of the grace
2329 * period? If no, then exit and wait for the next call.
2330 */
2331 if (rdp->cpu_no_qs.b.norm)
2332 return;
2333
2334 /*
2335 * Tell RCU we are done (but rcu_report_qs_rdp() will be the
2336 * judge of that).
2337 */
2338 rcu_report_qs_rdp(rdp);
2339 }
2340
2341 /*
2342 * Near the end of the offline process. Trace the fact that this CPU
2343 * is going offline.
2344 */
rcutree_dying_cpu(unsigned int cpu)2345 int rcutree_dying_cpu(unsigned int cpu)
2346 {
2347 bool blkd;
2348 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
2349 struct rcu_node *rnp = rdp->mynode;
2350
2351 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
2352 return 0;
2353
2354 blkd = !!(rnp->qsmask & rdp->grpmask);
2355 trace_rcu_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
2356 blkd ? TPS("cpuofl") : TPS("cpuofl-bgp"));
2357 return 0;
2358 }
2359
2360 /*
2361 * All CPUs for the specified rcu_node structure have gone offline,
2362 * and all tasks that were preempted within an RCU read-side critical
2363 * section while running on one of those CPUs have since exited their RCU
2364 * read-side critical section. Some other CPU is reporting this fact with
2365 * the specified rcu_node structure's ->lock held and interrupts disabled.
2366 * This function therefore goes up the tree of rcu_node structures,
2367 * clearing the corresponding bits in the ->qsmaskinit fields. Note that
2368 * the leaf rcu_node structure's ->qsmaskinit field has already been
2369 * updated.
2370 *
2371 * This function does check that the specified rcu_node structure has
2372 * all CPUs offline and no blocked tasks, so it is OK to invoke it
2373 * prematurely. That said, invoking it after the fact will cost you
2374 * a needless lock acquisition. So once it has done its work, don't
2375 * invoke it again.
2376 */
rcu_cleanup_dead_rnp(struct rcu_node * rnp_leaf)2377 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
2378 {
2379 long mask;
2380 struct rcu_node *rnp = rnp_leaf;
2381
2382 raw_lockdep_assert_held_rcu_node(rnp_leaf);
2383 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
2384 WARN_ON_ONCE(rnp_leaf->qsmaskinit) ||
2385 WARN_ON_ONCE(rcu_preempt_has_tasks(rnp_leaf)))
2386 return;
2387 for (;;) {
2388 mask = rnp->grpmask;
2389 rnp = rnp->parent;
2390 if (!rnp)
2391 break;
2392 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
2393 rnp->qsmaskinit &= ~mask;
2394 /* Between grace periods, so better already be zero! */
2395 WARN_ON_ONCE(rnp->qsmask);
2396 if (rnp->qsmaskinit) {
2397 raw_spin_unlock_rcu_node(rnp);
2398 /* irqs remain disabled. */
2399 return;
2400 }
2401 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
2402 }
2403 }
2404
2405 /*
2406 * The CPU has been completely removed, and some other CPU is reporting
2407 * this fact from process context. Do the remainder of the cleanup.
2408 * There can only be one CPU hotplug operation at a time, so no need for
2409 * explicit locking.
2410 */
rcutree_dead_cpu(unsigned int cpu)2411 int rcutree_dead_cpu(unsigned int cpu)
2412 {
2413 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
2414 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
2415
2416 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
2417 return 0;
2418
2419 /* Adjust any no-longer-needed kthreads. */
2420 rcu_boost_kthread_setaffinity(rnp, -1);
2421 /* Do any needed no-CB deferred wakeups from this CPU. */
2422 do_nocb_deferred_wakeup(per_cpu_ptr(&rcu_data, cpu));
2423
2424 // Stop-machine done, so allow nohz_full to disable tick.
2425 tick_dep_clear(TICK_DEP_BIT_RCU);
2426 return 0;
2427 }
2428
2429 /*
2430 * Invoke any RCU callbacks that have made it to the end of their grace
2431 * period. Thottle as specified by rdp->blimit.
2432 */
rcu_do_batch(struct rcu_data * rdp)2433 static void rcu_do_batch(struct rcu_data *rdp)
2434 {
2435 int div;
2436 unsigned long flags;
2437 const bool offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
2438 rcu_segcblist_is_offloaded(&rdp->cblist);
2439 struct rcu_head *rhp;
2440 struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
2441 long bl, count;
2442 long pending, tlimit = 0;
2443
2444 /* If no callbacks are ready, just return. */
2445 if (!rcu_segcblist_ready_cbs(&rdp->cblist)) {
2446 trace_rcu_batch_start(rcu_state.name,
2447 rcu_segcblist_n_cbs(&rdp->cblist), 0);
2448 trace_rcu_batch_end(rcu_state.name, 0,
2449 !rcu_segcblist_empty(&rdp->cblist),
2450 need_resched(), is_idle_task(current),
2451 rcu_is_callbacks_kthread());
2452 return;
2453 }
2454
2455 /*
2456 * Extract the list of ready callbacks, disabling to prevent
2457 * races with call_rcu() from interrupt handlers. Leave the
2458 * callback counts, as rcu_barrier() needs to be conservative.
2459 */
2460 local_irq_save(flags);
2461 rcu_nocb_lock(rdp);
2462 WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
2463 pending = rcu_segcblist_n_cbs(&rdp->cblist);
2464 div = READ_ONCE(rcu_divisor);
2465 div = div < 0 ? 7 : div > sizeof(long) * 8 - 2 ? sizeof(long) * 8 - 2 : div;
2466 bl = max(rdp->blimit, pending >> div);
2467 if (in_serving_softirq() && unlikely(bl > 100)) {
2468 long rrn = READ_ONCE(rcu_resched_ns);
2469
2470 rrn = rrn < NSEC_PER_MSEC ? NSEC_PER_MSEC : rrn > NSEC_PER_SEC ? NSEC_PER_SEC : rrn;
2471 tlimit = local_clock() + rrn;
2472 }
2473 trace_rcu_batch_start(rcu_state.name,
2474 rcu_segcblist_n_cbs(&rdp->cblist), bl);
2475 rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl);
2476 if (offloaded)
2477 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
2478 rcu_nocb_unlock_irqrestore(rdp, flags);
2479
2480 /* Invoke callbacks. */
2481 tick_dep_set_task(current, TICK_DEP_BIT_RCU);
2482 rhp = rcu_cblist_dequeue(&rcl);
2483 for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) {
2484 rcu_callback_t f;
2485
2486 debug_rcu_head_unqueue(rhp);
2487
2488 rcu_lock_acquire(&rcu_callback_map);
2489 trace_rcu_invoke_callback(rcu_state.name, rhp);
2490
2491 f = rhp->func;
2492 WRITE_ONCE(rhp->func, (rcu_callback_t)0L);
2493 f(rhp);
2494
2495 rcu_lock_release(&rcu_callback_map);
2496
2497 /*
2498 * Stop only if limit reached and CPU has something to do.
2499 * Note: The rcl structure counts down from zero.
2500 */
2501 if (in_serving_softirq()) {
2502 if (-rcl.len >= bl && (need_resched() ||
2503 (!is_idle_task(current) && !rcu_is_callbacks_kthread())))
2504 break;
2505
2506 /*
2507 * Make sure we don't spend too much time here and deprive other
2508 * softirq vectors of CPU cycles.
2509 */
2510 if (unlikely(tlimit)) {
2511 /* only call local_clock() every 32 callbacks */
2512 if (likely((-rcl.len & 31) || local_clock() < tlimit))
2513 continue;
2514 /* Exceeded the time limit, so leave. */
2515 break;
2516 }
2517 } else {
2518 local_bh_enable();
2519 lockdep_assert_irqs_enabled();
2520 cond_resched_tasks_rcu_qs();
2521 lockdep_assert_irqs_enabled();
2522 local_bh_disable();
2523 }
2524 }
2525
2526 local_irq_save(flags);
2527 rcu_nocb_lock(rdp);
2528 count = -rcl.len;
2529 rdp->n_cbs_invoked += count;
2530 trace_rcu_batch_end(rcu_state.name, count, !!rcl.head, need_resched(),
2531 is_idle_task(current), rcu_is_callbacks_kthread());
2532
2533 /* Update counts and requeue any remaining callbacks. */
2534 rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl);
2535 smp_mb(); /* List handling before counting for rcu_barrier(). */
2536 rcu_segcblist_insert_count(&rdp->cblist, &rcl);
2537
2538 /* Reinstate batch limit if we have worked down the excess. */
2539 count = rcu_segcblist_n_cbs(&rdp->cblist);
2540 if (rdp->blimit >= DEFAULT_MAX_RCU_BLIMIT && count <= qlowmark)
2541 rdp->blimit = blimit;
2542
2543 /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
2544 if (count == 0 && rdp->qlen_last_fqs_check != 0) {
2545 rdp->qlen_last_fqs_check = 0;
2546 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
2547 } else if (count < rdp->qlen_last_fqs_check - qhimark)
2548 rdp->qlen_last_fqs_check = count;
2549
2550 /*
2551 * The following usually indicates a double call_rcu(). To track
2552 * this down, try building with CONFIG_DEBUG_OBJECTS_RCU_HEAD=y.
2553 */
2554 WARN_ON_ONCE(count == 0 && !rcu_segcblist_empty(&rdp->cblist));
2555 WARN_ON_ONCE(!IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
2556 count != 0 && rcu_segcblist_empty(&rdp->cblist));
2557
2558 rcu_nocb_unlock_irqrestore(rdp, flags);
2559
2560 /* Re-invoke RCU core processing if there are callbacks remaining. */
2561 if (!offloaded && rcu_segcblist_ready_cbs(&rdp->cblist))
2562 invoke_rcu_core();
2563 tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
2564 }
2565
2566 /*
2567 * This function is invoked from each scheduling-clock interrupt,
2568 * and checks to see if this CPU is in a non-context-switch quiescent
2569 * state, for example, user mode or idle loop. It also schedules RCU
2570 * core processing. If the current grace period has gone on too long,
2571 * it will ask the scheduler to manufacture a context switch for the sole
2572 * purpose of providing a providing the needed quiescent state.
2573 */
rcu_sched_clock_irq(int user)2574 void rcu_sched_clock_irq(int user)
2575 {
2576 trace_rcu_utilization(TPS("Start scheduler-tick"));
2577 lockdep_assert_irqs_disabled();
2578 raw_cpu_inc(rcu_data.ticks_this_gp);
2579 /* The load-acquire pairs with the store-release setting to true. */
2580 if (smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) {
2581 /* Idle and userspace execution already are quiescent states. */
2582 if (!rcu_is_cpu_rrupt_from_idle() && !user) {
2583 set_tsk_need_resched(current);
2584 set_preempt_need_resched();
2585 }
2586 __this_cpu_write(rcu_data.rcu_urgent_qs, false);
2587 }
2588 rcu_flavor_sched_clock_irq(user);
2589 if (rcu_pending(user))
2590 invoke_rcu_core();
2591 lockdep_assert_irqs_disabled();
2592
2593 trace_rcu_utilization(TPS("End scheduler-tick"));
2594 }
2595
2596 /*
2597 * Scan the leaf rcu_node structures. For each structure on which all
2598 * CPUs have reported a quiescent state and on which there are tasks
2599 * blocking the current grace period, initiate RCU priority boosting.
2600 * Otherwise, invoke the specified function to check dyntick state for
2601 * each CPU that has not yet reported a quiescent state.
2602 */
force_qs_rnp(int (* f)(struct rcu_data * rdp))2603 static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
2604 {
2605 int cpu;
2606 unsigned long flags;
2607 unsigned long mask;
2608 struct rcu_data *rdp;
2609 struct rcu_node *rnp;
2610
2611 rcu_state.cbovld = rcu_state.cbovldnext;
2612 rcu_state.cbovldnext = false;
2613 rcu_for_each_leaf_node(rnp) {
2614 cond_resched_tasks_rcu_qs();
2615 mask = 0;
2616 raw_spin_lock_irqsave_rcu_node(rnp, flags);
2617 rcu_state.cbovldnext |= !!rnp->cbovldmask;
2618 if (rnp->qsmask == 0) {
2619 if (rcu_preempt_blocked_readers_cgp(rnp)) {
2620 /*
2621 * No point in scanning bits because they
2622 * are all zero. But we might need to
2623 * priority-boost blocked readers.
2624 */
2625 rcu_initiate_boost(rnp, flags);
2626 /* rcu_initiate_boost() releases rnp->lock */
2627 continue;
2628 }
2629 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2630 continue;
2631 }
2632 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->qsmask) {
2633 rdp = per_cpu_ptr(&rcu_data, cpu);
2634 if (f(rdp)) {
2635 mask |= rdp->grpmask;
2636 rcu_disable_urgency_upon_qs(rdp);
2637 }
2638 }
2639 if (mask != 0) {
2640 /* Idle/offline CPUs, report (releases rnp->lock). */
2641 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
2642 } else {
2643 /* Nothing to do here, so just drop the lock. */
2644 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2645 }
2646 }
2647 }
2648
2649 /*
2650 * Force quiescent states on reluctant CPUs, and also detect which
2651 * CPUs are in dyntick-idle mode.
2652 */
rcu_force_quiescent_state(void)2653 void rcu_force_quiescent_state(void)
2654 {
2655 unsigned long flags;
2656 bool ret;
2657 struct rcu_node *rnp;
2658 struct rcu_node *rnp_old = NULL;
2659
2660 /* Funnel through hierarchy to reduce memory contention. */
2661 rnp = raw_cpu_read(rcu_data.mynode);
2662 for (; rnp != NULL; rnp = rnp->parent) {
2663 ret = (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) ||
2664 !raw_spin_trylock(&rnp->fqslock);
2665 if (rnp_old != NULL)
2666 raw_spin_unlock(&rnp_old->fqslock);
2667 if (ret)
2668 return;
2669 rnp_old = rnp;
2670 }
2671 /* rnp_old == rcu_get_root(), rnp == NULL. */
2672
2673 /* Reached the root of the rcu_node tree, acquire lock. */
2674 raw_spin_lock_irqsave_rcu_node(rnp_old, flags);
2675 raw_spin_unlock(&rnp_old->fqslock);
2676 if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
2677 raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2678 return; /* Someone beat us to it. */
2679 }
2680 WRITE_ONCE(rcu_state.gp_flags,
2681 READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS);
2682 raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2683 rcu_gp_kthread_wake();
2684 }
2685 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
2686
2687 // Workqueue handler for an RCU reader for kernels enforcing struct RCU
2688 // grace periods.
strict_work_handler(struct work_struct * work)2689 static void strict_work_handler(struct work_struct *work)
2690 {
2691 rcu_read_lock();
2692 rcu_read_unlock();
2693 }
2694
2695 /* Perform RCU core processing work for the current CPU. */
rcu_core(void)2696 static __latent_entropy void rcu_core(void)
2697 {
2698 unsigned long flags;
2699 struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);
2700 struct rcu_node *rnp = rdp->mynode;
2701 const bool offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
2702 rcu_segcblist_is_offloaded(&rdp->cblist);
2703
2704 if (cpu_is_offline(smp_processor_id()))
2705 return;
2706 trace_rcu_utilization(TPS("Start RCU core"));
2707 WARN_ON_ONCE(!rdp->beenonline);
2708
2709 /* Report any deferred quiescent states if preemption enabled. */
2710 if (!(preempt_count() & PREEMPT_MASK)) {
2711 rcu_preempt_deferred_qs(current);
2712 } else if (rcu_preempt_need_deferred_qs(current)) {
2713 set_tsk_need_resched(current);
2714 set_preempt_need_resched();
2715 }
2716
2717 /* Update RCU state based on any recent quiescent states. */
2718 rcu_check_quiescent_state(rdp);
2719
2720 /* No grace period and unregistered callbacks? */
2721 if (!rcu_gp_in_progress() &&
2722 rcu_segcblist_is_enabled(&rdp->cblist) && !offloaded) {
2723 local_irq_save(flags);
2724 if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
2725 rcu_accelerate_cbs_unlocked(rnp, rdp);
2726 local_irq_restore(flags);
2727 }
2728
2729 rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check());
2730
2731 /* If there are callbacks ready, invoke them. */
2732 if (!offloaded && rcu_segcblist_ready_cbs(&rdp->cblist) &&
2733 likely(READ_ONCE(rcu_scheduler_fully_active)))
2734 rcu_do_batch(rdp);
2735
2736 /* Do any needed deferred wakeups of rcuo kthreads. */
2737 do_nocb_deferred_wakeup(rdp);
2738 trace_rcu_utilization(TPS("End RCU core"));
2739
2740 // If strict GPs, schedule an RCU reader in a clean environment.
2741 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
2742 queue_work_on(rdp->cpu, rcu_gp_wq, &rdp->strict_work);
2743 }
2744
rcu_core_si(struct softirq_action * h)2745 static void rcu_core_si(struct softirq_action *h)
2746 {
2747 rcu_core();
2748 }
2749
rcu_wake_cond(struct task_struct * t,int status)2750 static void rcu_wake_cond(struct task_struct *t, int status)
2751 {
2752 /*
2753 * If the thread is yielding, only wake it when this
2754 * is invoked from idle
2755 */
2756 if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current)))
2757 wake_up_process(t);
2758 }
2759
invoke_rcu_core_kthread(void)2760 static void invoke_rcu_core_kthread(void)
2761 {
2762 struct task_struct *t;
2763 unsigned long flags;
2764
2765 local_irq_save(flags);
2766 __this_cpu_write(rcu_data.rcu_cpu_has_work, 1);
2767 t = __this_cpu_read(rcu_data.rcu_cpu_kthread_task);
2768 if (t != NULL && t != current)
2769 rcu_wake_cond(t, __this_cpu_read(rcu_data.rcu_cpu_kthread_status));
2770 local_irq_restore(flags);
2771 }
2772
2773 /*
2774 * Wake up this CPU's rcuc kthread to do RCU core processing.
2775 */
invoke_rcu_core(void)2776 static void invoke_rcu_core(void)
2777 {
2778 if (!cpu_online(smp_processor_id()))
2779 return;
2780 if (use_softirq)
2781 raise_softirq(RCU_SOFTIRQ);
2782 else
2783 invoke_rcu_core_kthread();
2784 }
2785
rcu_cpu_kthread_park(unsigned int cpu)2786 static void rcu_cpu_kthread_park(unsigned int cpu)
2787 {
2788 per_cpu(rcu_data.rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
2789 }
2790
rcu_cpu_kthread_should_run(unsigned int cpu)2791 static int rcu_cpu_kthread_should_run(unsigned int cpu)
2792 {
2793 return __this_cpu_read(rcu_data.rcu_cpu_has_work);
2794 }
2795
2796 /*
2797 * Per-CPU kernel thread that invokes RCU callbacks. This replaces
2798 * the RCU softirq used in configurations of RCU that do not support RCU
2799 * priority boosting.
2800 */
rcu_cpu_kthread(unsigned int cpu)2801 static void rcu_cpu_kthread(unsigned int cpu)
2802 {
2803 unsigned int *statusp = this_cpu_ptr(&rcu_data.rcu_cpu_kthread_status);
2804 char work, *workp = this_cpu_ptr(&rcu_data.rcu_cpu_has_work);
2805 int spincnt;
2806
2807 trace_rcu_utilization(TPS("Start CPU kthread@rcu_run"));
2808 for (spincnt = 0; spincnt < 10; spincnt++) {
2809 local_bh_disable();
2810 *statusp = RCU_KTHREAD_RUNNING;
2811 local_irq_disable();
2812 work = *workp;
2813 *workp = 0;
2814 local_irq_enable();
2815 if (work)
2816 rcu_core();
2817 local_bh_enable();
2818 if (*workp == 0) {
2819 trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
2820 *statusp = RCU_KTHREAD_WAITING;
2821 return;
2822 }
2823 }
2824 *statusp = RCU_KTHREAD_YIELDING;
2825 trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
2826 schedule_timeout_idle(2);
2827 trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
2828 *statusp = RCU_KTHREAD_WAITING;
2829 }
2830
2831 static struct smp_hotplug_thread rcu_cpu_thread_spec = {
2832 .store = &rcu_data.rcu_cpu_kthread_task,
2833 .thread_should_run = rcu_cpu_kthread_should_run,
2834 .thread_fn = rcu_cpu_kthread,
2835 .thread_comm = "rcuc/%u",
2836 .setup = rcu_cpu_kthread_setup,
2837 .park = rcu_cpu_kthread_park,
2838 };
2839
2840 /*
2841 * Spawn per-CPU RCU core processing kthreads.
2842 */
rcu_spawn_core_kthreads(void)2843 static int __init rcu_spawn_core_kthreads(void)
2844 {
2845 int cpu;
2846
2847 for_each_possible_cpu(cpu)
2848 per_cpu(rcu_data.rcu_cpu_has_work, cpu) = 0;
2849 if (!IS_ENABLED(CONFIG_RCU_BOOST) && use_softirq)
2850 return 0;
2851 WARN_ONCE(smpboot_register_percpu_thread(&rcu_cpu_thread_spec),
2852 "%s: Could not start rcuc kthread, OOM is now expected behavior\n", __func__);
2853 return 0;
2854 }
2855
2856 /*
2857 * Handle any core-RCU processing required by a call_rcu() invocation.
2858 */
__call_rcu_core(struct rcu_data * rdp,struct rcu_head * head,unsigned long flags)2859 static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head,
2860 unsigned long flags)
2861 {
2862 /*
2863 * If called from an extended quiescent state, invoke the RCU
2864 * core in order to force a re-evaluation of RCU's idleness.
2865 */
2866 if (!rcu_is_watching())
2867 invoke_rcu_core();
2868
2869 /* If interrupts were disabled or CPU offline, don't invoke RCU core. */
2870 if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id()))
2871 return;
2872
2873 /*
2874 * Force the grace period if too many callbacks or too long waiting.
2875 * Enforce hysteresis, and don't invoke rcu_force_quiescent_state()
2876 * if some other CPU has recently done so. Also, don't bother
2877 * invoking rcu_force_quiescent_state() if the newly enqueued callback
2878 * is the only one waiting for a grace period to complete.
2879 */
2880 if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) >
2881 rdp->qlen_last_fqs_check + qhimark)) {
2882
2883 /* Are we ignoring a completed grace period? */
2884 note_gp_changes(rdp);
2885
2886 /* Start a new grace period if one not already started. */
2887 if (!rcu_gp_in_progress()) {
2888 rcu_accelerate_cbs_unlocked(rdp->mynode, rdp);
2889 } else {
2890 /* Give the grace period a kick. */
2891 rdp->blimit = DEFAULT_MAX_RCU_BLIMIT;
2892 if (READ_ONCE(rcu_state.n_force_qs) == rdp->n_force_qs_snap &&
2893 rcu_segcblist_first_pend_cb(&rdp->cblist) != head)
2894 rcu_force_quiescent_state();
2895 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
2896 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
2897 }
2898 }
2899 }
2900
2901 /*
2902 * RCU callback function to leak a callback.
2903 */
rcu_leak_callback(struct rcu_head * rhp)2904 static void rcu_leak_callback(struct rcu_head *rhp)
2905 {
2906 }
2907
2908 /*
2909 * Check and if necessary update the leaf rcu_node structure's
2910 * ->cbovldmask bit corresponding to the current CPU based on that CPU's
2911 * number of queued RCU callbacks. The caller must hold the leaf rcu_node
2912 * structure's ->lock.
2913 */
check_cb_ovld_locked(struct rcu_data * rdp,struct rcu_node * rnp)2914 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp)
2915 {
2916 raw_lockdep_assert_held_rcu_node(rnp);
2917 if (qovld_calc <= 0)
2918 return; // Early boot and wildcard value set.
2919 if (rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc)
2920 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask | rdp->grpmask);
2921 else
2922 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask & ~rdp->grpmask);
2923 }
2924
2925 /*
2926 * Check and if necessary update the leaf rcu_node structure's
2927 * ->cbovldmask bit corresponding to the current CPU based on that CPU's
2928 * number of queued RCU callbacks. No locks need be held, but the
2929 * caller must have disabled interrupts.
2930 *
2931 * Note that this function ignores the possibility that there are a lot
2932 * of callbacks all of which have already seen the end of their respective
2933 * grace periods. This omission is due to the need for no-CBs CPUs to
2934 * be holding ->nocb_lock to do this check, which is too heavy for a
2935 * common-case operation.
2936 */
check_cb_ovld(struct rcu_data * rdp)2937 static void check_cb_ovld(struct rcu_data *rdp)
2938 {
2939 struct rcu_node *const rnp = rdp->mynode;
2940
2941 if (qovld_calc <= 0 ||
2942 ((rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc) ==
2943 !!(READ_ONCE(rnp->cbovldmask) & rdp->grpmask)))
2944 return; // Early boot wildcard value or already set correctly.
2945 raw_spin_lock_rcu_node(rnp);
2946 check_cb_ovld_locked(rdp, rnp);
2947 raw_spin_unlock_rcu_node(rnp);
2948 }
2949
2950 /* Helper function for call_rcu() and friends. */
2951 static void
__call_rcu(struct rcu_head * head,rcu_callback_t func)2952 __call_rcu(struct rcu_head *head, rcu_callback_t func)
2953 {
2954 unsigned long flags;
2955 struct rcu_data *rdp;
2956 bool was_alldone;
2957
2958 /* Misaligned rcu_head! */
2959 WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1));
2960
2961 if (debug_rcu_head_queue(head)) {
2962 /*
2963 * Probable double call_rcu(), so leak the callback.
2964 * Use rcu:rcu_callback trace event to find the previous
2965 * time callback was passed to __call_rcu().
2966 */
2967 WARN_ONCE(1, "__call_rcu(): Double-freed CB %p->%pS()!!!\n",
2968 head, head->func);
2969 WRITE_ONCE(head->func, rcu_leak_callback);
2970 return;
2971 }
2972 head->func = func;
2973 head->next = NULL;
2974 local_irq_save(flags);
2975 kasan_record_aux_stack(head);
2976 rdp = this_cpu_ptr(&rcu_data);
2977
2978 /* Add the callback to our list. */
2979 if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist))) {
2980 // This can trigger due to call_rcu() from offline CPU:
2981 WARN_ON_ONCE(rcu_scheduler_active != RCU_SCHEDULER_INACTIVE);
2982 WARN_ON_ONCE(!rcu_is_watching());
2983 // Very early boot, before rcu_init(). Initialize if needed
2984 // and then drop through to queue the callback.
2985 if (rcu_segcblist_empty(&rdp->cblist))
2986 rcu_segcblist_init(&rdp->cblist);
2987 }
2988
2989 check_cb_ovld(rdp);
2990 if (rcu_nocb_try_bypass(rdp, head, &was_alldone, flags))
2991 return; // Enqueued onto ->nocb_bypass, so just leave.
2992 // If no-CBs CPU gets here, rcu_nocb_try_bypass() acquired ->nocb_lock.
2993 rcu_segcblist_enqueue(&rdp->cblist, head);
2994 if (__is_kvfree_rcu_offset((unsigned long)func))
2995 trace_rcu_kvfree_callback(rcu_state.name, head,
2996 (unsigned long)func,
2997 rcu_segcblist_n_cbs(&rdp->cblist));
2998 else
2999 trace_rcu_callback(rcu_state.name, head,
3000 rcu_segcblist_n_cbs(&rdp->cblist));
3001
3002 /* Go handle any RCU core processing required. */
3003 if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
3004 unlikely(rcu_segcblist_is_offloaded(&rdp->cblist))) {
3005 __call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */
3006 } else {
3007 __call_rcu_core(rdp, head, flags);
3008 local_irq_restore(flags);
3009 }
3010 }
3011
3012 /**
3013 * call_rcu() - Queue an RCU callback for invocation after a grace period.
3014 * @head: structure to be used for queueing the RCU updates.
3015 * @func: actual callback function to be invoked after the grace period
3016 *
3017 * The callback function will be invoked some time after a full grace
3018 * period elapses, in other words after all pre-existing RCU read-side
3019 * critical sections have completed. However, the callback function
3020 * might well execute concurrently with RCU read-side critical sections
3021 * that started after call_rcu() was invoked. RCU read-side critical
3022 * sections are delimited by rcu_read_lock() and rcu_read_unlock(), and
3023 * may be nested. In addition, regions of code across which interrupts,
3024 * preemption, or softirqs have been disabled also serve as RCU read-side
3025 * critical sections. This includes hardware interrupt handlers, softirq
3026 * handlers, and NMI handlers.
3027 *
3028 * Note that all CPUs must agree that the grace period extended beyond
3029 * all pre-existing RCU read-side critical section. On systems with more
3030 * than one CPU, this means that when "func()" is invoked, each CPU is
3031 * guaranteed to have executed a full memory barrier since the end of its
3032 * last RCU read-side critical section whose beginning preceded the call
3033 * to call_rcu(). It also means that each CPU executing an RCU read-side
3034 * critical section that continues beyond the start of "func()" must have
3035 * executed a memory barrier after the call_rcu() but before the beginning
3036 * of that RCU read-side critical section. Note that these guarantees
3037 * include CPUs that are offline, idle, or executing in user mode, as
3038 * well as CPUs that are executing in the kernel.
3039 *
3040 * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
3041 * resulting RCU callback function "func()", then both CPU A and CPU B are
3042 * guaranteed to execute a full memory barrier during the time interval
3043 * between the call to call_rcu() and the invocation of "func()" -- even
3044 * if CPU A and CPU B are the same CPU (but again only if the system has
3045 * more than one CPU).
3046 */
call_rcu(struct rcu_head * head,rcu_callback_t func)3047 void call_rcu(struct rcu_head *head, rcu_callback_t func)
3048 {
3049 __call_rcu(head, func);
3050 }
3051 EXPORT_SYMBOL_GPL(call_rcu);
3052
3053
3054 /* Maximum number of jiffies to wait before draining a batch. */
3055 #define KFREE_DRAIN_JIFFIES (HZ / 50)
3056 #define KFREE_N_BATCHES 2
3057 #define FREE_N_CHANNELS 2
3058
3059 /**
3060 * struct kvfree_rcu_bulk_data - single block to store kvfree_rcu() pointers
3061 * @nr_records: Number of active pointers in the array
3062 * @next: Next bulk object in the block chain
3063 * @records: Array of the kvfree_rcu() pointers
3064 */
3065 struct kvfree_rcu_bulk_data {
3066 unsigned long nr_records;
3067 struct kvfree_rcu_bulk_data *next;
3068 void *records[];
3069 };
3070
3071 /*
3072 * This macro defines how many entries the "records" array
3073 * will contain. It is based on the fact that the size of
3074 * kvfree_rcu_bulk_data structure becomes exactly one page.
3075 */
3076 #define KVFREE_BULK_MAX_ENTR \
3077 ((PAGE_SIZE - sizeof(struct kvfree_rcu_bulk_data)) / sizeof(void *))
3078
3079 /**
3080 * struct kfree_rcu_cpu_work - single batch of kfree_rcu() requests
3081 * @rcu_work: Let queue_rcu_work() invoke workqueue handler after grace period
3082 * @head_free: List of kfree_rcu() objects waiting for a grace period
3083 * @bkvhead_free: Bulk-List of kvfree_rcu() objects waiting for a grace period
3084 * @krcp: Pointer to @kfree_rcu_cpu structure
3085 */
3086
3087 struct kfree_rcu_cpu_work {
3088 struct rcu_work rcu_work;
3089 struct rcu_head *head_free;
3090 struct kvfree_rcu_bulk_data *bkvhead_free[FREE_N_CHANNELS];
3091 struct kfree_rcu_cpu *krcp;
3092 };
3093
3094 /**
3095 * struct kfree_rcu_cpu - batch up kfree_rcu() requests for RCU grace period
3096 * @head: List of kfree_rcu() objects not yet waiting for a grace period
3097 * @bkvhead: Bulk-List of kvfree_rcu() objects not yet waiting for a grace period
3098 * @krw_arr: Array of batches of kfree_rcu() objects waiting for a grace period
3099 * @lock: Synchronize access to this structure
3100 * @monitor_work: Promote @head to @head_free after KFREE_DRAIN_JIFFIES
3101 * @monitor_todo: Tracks whether a @monitor_work delayed work is pending
3102 * @initialized: The @rcu_work fields have been initialized
3103 * @count: Number of objects for which GP not started
3104 * @bkvcache:
3105 * A simple cache list that contains objects for reuse purpose.
3106 * In order to save some per-cpu space the list is singular.
3107 * Even though it is lockless an access has to be protected by the
3108 * per-cpu lock.
3109 * @page_cache_work: A work to refill the cache when it is empty
3110 * @work_in_progress: Indicates that page_cache_work is running
3111 * @hrtimer: A hrtimer for scheduling a page_cache_work
3112 * @nr_bkv_objs: number of allocated objects at @bkvcache.
3113 *
3114 * This is a per-CPU structure. The reason that it is not included in
3115 * the rcu_data structure is to permit this code to be extracted from
3116 * the RCU files. Such extraction could allow further optimization of
3117 * the interactions with the slab allocators.
3118 */
3119 struct kfree_rcu_cpu {
3120 struct rcu_head *head;
3121 struct kvfree_rcu_bulk_data *bkvhead[FREE_N_CHANNELS];
3122 struct kfree_rcu_cpu_work krw_arr[KFREE_N_BATCHES];
3123 raw_spinlock_t lock;
3124 struct delayed_work monitor_work;
3125 bool monitor_todo;
3126 bool initialized;
3127 int count;
3128
3129 struct work_struct page_cache_work;
3130 atomic_t work_in_progress;
3131 struct hrtimer hrtimer;
3132
3133 struct llist_head bkvcache;
3134 int nr_bkv_objs;
3135 };
3136
3137 static DEFINE_PER_CPU(struct kfree_rcu_cpu, krc) = {
3138 .lock = __RAW_SPIN_LOCK_UNLOCKED(krc.lock),
3139 };
3140
3141 static __always_inline void
debug_rcu_bhead_unqueue(struct kvfree_rcu_bulk_data * bhead)3142 debug_rcu_bhead_unqueue(struct kvfree_rcu_bulk_data *bhead)
3143 {
3144 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
3145 int i;
3146
3147 for (i = 0; i < bhead->nr_records; i++)
3148 debug_rcu_head_unqueue((struct rcu_head *)(bhead->records[i]));
3149 #endif
3150 }
3151
3152 static inline struct kfree_rcu_cpu *
krc_this_cpu_lock(unsigned long * flags)3153 krc_this_cpu_lock(unsigned long *flags)
3154 {
3155 struct kfree_rcu_cpu *krcp;
3156
3157 local_irq_save(*flags); // For safely calling this_cpu_ptr().
3158 krcp = this_cpu_ptr(&krc);
3159 raw_spin_lock(&krcp->lock);
3160
3161 return krcp;
3162 }
3163
3164 static inline void
krc_this_cpu_unlock(struct kfree_rcu_cpu * krcp,unsigned long flags)3165 krc_this_cpu_unlock(struct kfree_rcu_cpu *krcp, unsigned long flags)
3166 {
3167 raw_spin_unlock(&krcp->lock);
3168 local_irq_restore(flags);
3169 }
3170
3171 static inline struct kvfree_rcu_bulk_data *
get_cached_bnode(struct kfree_rcu_cpu * krcp)3172 get_cached_bnode(struct kfree_rcu_cpu *krcp)
3173 {
3174 if (!krcp->nr_bkv_objs)
3175 return NULL;
3176
3177 krcp->nr_bkv_objs--;
3178 return (struct kvfree_rcu_bulk_data *)
3179 llist_del_first(&krcp->bkvcache);
3180 }
3181
3182 static inline bool
put_cached_bnode(struct kfree_rcu_cpu * krcp,struct kvfree_rcu_bulk_data * bnode)3183 put_cached_bnode(struct kfree_rcu_cpu *krcp,
3184 struct kvfree_rcu_bulk_data *bnode)
3185 {
3186 // Check the limit.
3187 if (krcp->nr_bkv_objs >= rcu_min_cached_objs)
3188 return false;
3189
3190 llist_add((struct llist_node *) bnode, &krcp->bkvcache);
3191 krcp->nr_bkv_objs++;
3192 return true;
3193
3194 }
3195
3196 /*
3197 * This function is invoked in workqueue context after a grace period.
3198 * It frees all the objects queued on ->bhead_free or ->head_free.
3199 */
kfree_rcu_work(struct work_struct * work)3200 static void kfree_rcu_work(struct work_struct *work)
3201 {
3202 unsigned long flags;
3203 struct kvfree_rcu_bulk_data *bkvhead[FREE_N_CHANNELS], *bnext;
3204 struct rcu_head *head, *next;
3205 struct kfree_rcu_cpu *krcp;
3206 struct kfree_rcu_cpu_work *krwp;
3207 int i, j;
3208
3209 krwp = container_of(to_rcu_work(work),
3210 struct kfree_rcu_cpu_work, rcu_work);
3211 krcp = krwp->krcp;
3212
3213 raw_spin_lock_irqsave(&krcp->lock, flags);
3214 // Channels 1 and 2.
3215 for (i = 0; i < FREE_N_CHANNELS; i++) {
3216 bkvhead[i] = krwp->bkvhead_free[i];
3217 krwp->bkvhead_free[i] = NULL;
3218 }
3219
3220 // Channel 3.
3221 head = krwp->head_free;
3222 krwp->head_free = NULL;
3223 raw_spin_unlock_irqrestore(&krcp->lock, flags);
3224
3225 // Handle two first channels.
3226 for (i = 0; i < FREE_N_CHANNELS; i++) {
3227 for (; bkvhead[i]; bkvhead[i] = bnext) {
3228 bnext = bkvhead[i]->next;
3229 debug_rcu_bhead_unqueue(bkvhead[i]);
3230
3231 rcu_lock_acquire(&rcu_callback_map);
3232 if (i == 0) { // kmalloc() / kfree().
3233 trace_rcu_invoke_kfree_bulk_callback(
3234 rcu_state.name, bkvhead[i]->nr_records,
3235 bkvhead[i]->records);
3236
3237 kfree_bulk(bkvhead[i]->nr_records,
3238 bkvhead[i]->records);
3239 } else { // vmalloc() / vfree().
3240 for (j = 0; j < bkvhead[i]->nr_records; j++) {
3241 trace_rcu_invoke_kvfree_callback(
3242 rcu_state.name,
3243 bkvhead[i]->records[j], 0);
3244
3245 vfree(bkvhead[i]->records[j]);
3246 }
3247 }
3248 rcu_lock_release(&rcu_callback_map);
3249
3250 raw_spin_lock_irqsave(&krcp->lock, flags);
3251 if (put_cached_bnode(krcp, bkvhead[i]))
3252 bkvhead[i] = NULL;
3253 raw_spin_unlock_irqrestore(&krcp->lock, flags);
3254
3255 if (bkvhead[i])
3256 free_page((unsigned long) bkvhead[i]);
3257
3258 cond_resched_tasks_rcu_qs();
3259 }
3260 }
3261
3262 /*
3263 * Emergency case only. It can happen under low memory
3264 * condition when an allocation gets failed, so the "bulk"
3265 * path can not be temporary maintained.
3266 */
3267 for (; head; head = next) {
3268 unsigned long offset = (unsigned long)head->func;
3269 void *ptr = (void *)head - offset;
3270
3271 next = head->next;
3272 debug_rcu_head_unqueue((struct rcu_head *)ptr);
3273 rcu_lock_acquire(&rcu_callback_map);
3274 trace_rcu_invoke_kvfree_callback(rcu_state.name, head, offset);
3275
3276 if (!WARN_ON_ONCE(!__is_kvfree_rcu_offset(offset)))
3277 kvfree(ptr);
3278
3279 rcu_lock_release(&rcu_callback_map);
3280 cond_resched_tasks_rcu_qs();
3281 }
3282 }
3283
3284 /*
3285 * Schedule the kfree batch RCU work to run in workqueue context after a GP.
3286 *
3287 * This function is invoked by kfree_rcu_monitor() when the KFREE_DRAIN_JIFFIES
3288 * timeout has been reached.
3289 */
queue_kfree_rcu_work(struct kfree_rcu_cpu * krcp)3290 static inline bool queue_kfree_rcu_work(struct kfree_rcu_cpu *krcp)
3291 {
3292 struct kfree_rcu_cpu_work *krwp;
3293 bool repeat = false;
3294 int i, j;
3295
3296 lockdep_assert_held(&krcp->lock);
3297
3298 for (i = 0; i < KFREE_N_BATCHES; i++) {
3299 krwp = &(krcp->krw_arr[i]);
3300
3301 /*
3302 * Try to detach bkvhead or head and attach it over any
3303 * available corresponding free channel. It can be that
3304 * a previous RCU batch is in progress, it means that
3305 * immediately to queue another one is not possible so
3306 * return false to tell caller to retry.
3307 */
3308 if ((krcp->bkvhead[0] && !krwp->bkvhead_free[0]) ||
3309 (krcp->bkvhead[1] && !krwp->bkvhead_free[1]) ||
3310 (krcp->head && !krwp->head_free)) {
3311 // Channel 1 corresponds to SLAB ptrs.
3312 // Channel 2 corresponds to vmalloc ptrs.
3313 for (j = 0; j < FREE_N_CHANNELS; j++) {
3314 if (!krwp->bkvhead_free[j]) {
3315 krwp->bkvhead_free[j] = krcp->bkvhead[j];
3316 krcp->bkvhead[j] = NULL;
3317 }
3318 }
3319
3320 // Channel 3 corresponds to emergency path.
3321 if (!krwp->head_free) {
3322 krwp->head_free = krcp->head;
3323 krcp->head = NULL;
3324 }
3325
3326 WRITE_ONCE(krcp->count, 0);
3327
3328 /*
3329 * One work is per one batch, so there are three
3330 * "free channels", the batch can handle. It can
3331 * be that the work is in the pending state when
3332 * channels have been detached following by each
3333 * other.
3334 */
3335 queue_rcu_work(system_wq, &krwp->rcu_work);
3336 }
3337
3338 // Repeat if any "free" corresponding channel is still busy.
3339 if (krcp->bkvhead[0] || krcp->bkvhead[1] || krcp->head)
3340 repeat = true;
3341 }
3342
3343 return !repeat;
3344 }
3345
kfree_rcu_drain_unlock(struct kfree_rcu_cpu * krcp,unsigned long flags)3346 static inline void kfree_rcu_drain_unlock(struct kfree_rcu_cpu *krcp,
3347 unsigned long flags)
3348 {
3349 // Attempt to start a new batch.
3350 krcp->monitor_todo = false;
3351 if (queue_kfree_rcu_work(krcp)) {
3352 // Success! Our job is done here.
3353 raw_spin_unlock_irqrestore(&krcp->lock, flags);
3354 return;
3355 }
3356
3357 // Previous RCU batch still in progress, try again later.
3358 krcp->monitor_todo = true;
3359 schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
3360 raw_spin_unlock_irqrestore(&krcp->lock, flags);
3361 }
3362
3363 /*
3364 * This function is invoked after the KFREE_DRAIN_JIFFIES timeout.
3365 * It invokes kfree_rcu_drain_unlock() to attempt to start another batch.
3366 */
kfree_rcu_monitor(struct work_struct * work)3367 static void kfree_rcu_monitor(struct work_struct *work)
3368 {
3369 unsigned long flags;
3370 struct kfree_rcu_cpu *krcp = container_of(work, struct kfree_rcu_cpu,
3371 monitor_work.work);
3372
3373 raw_spin_lock_irqsave(&krcp->lock, flags);
3374 if (krcp->monitor_todo)
3375 kfree_rcu_drain_unlock(krcp, flags);
3376 else
3377 raw_spin_unlock_irqrestore(&krcp->lock, flags);
3378 }
3379
3380 static enum hrtimer_restart
schedule_page_work_fn(struct hrtimer * t)3381 schedule_page_work_fn(struct hrtimer *t)
3382 {
3383 struct kfree_rcu_cpu *krcp =
3384 container_of(t, struct kfree_rcu_cpu, hrtimer);
3385
3386 queue_work(system_highpri_wq, &krcp->page_cache_work);
3387 return HRTIMER_NORESTART;
3388 }
3389
fill_page_cache_func(struct work_struct * work)3390 static void fill_page_cache_func(struct work_struct *work)
3391 {
3392 struct kvfree_rcu_bulk_data *bnode;
3393 struct kfree_rcu_cpu *krcp =
3394 container_of(work, struct kfree_rcu_cpu,
3395 page_cache_work);
3396 unsigned long flags;
3397 bool pushed;
3398 int i;
3399
3400 for (i = 0; i < rcu_min_cached_objs; i++) {
3401 bnode = (struct kvfree_rcu_bulk_data *)
3402 __get_free_page(GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
3403
3404 if (!bnode)
3405 break;
3406
3407 raw_spin_lock_irqsave(&krcp->lock, flags);
3408 pushed = put_cached_bnode(krcp, bnode);
3409 raw_spin_unlock_irqrestore(&krcp->lock, flags);
3410
3411 if (!pushed) {
3412 free_page((unsigned long) bnode);
3413 break;
3414 }
3415 }
3416
3417 atomic_set(&krcp->work_in_progress, 0);
3418 }
3419
3420 static void
run_page_cache_worker(struct kfree_rcu_cpu * krcp)3421 run_page_cache_worker(struct kfree_rcu_cpu *krcp)
3422 {
3423 if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
3424 !atomic_xchg(&krcp->work_in_progress, 1)) {
3425 hrtimer_init(&krcp->hrtimer, CLOCK_MONOTONIC,
3426 HRTIMER_MODE_REL);
3427 krcp->hrtimer.function = schedule_page_work_fn;
3428 hrtimer_start(&krcp->hrtimer, 0, HRTIMER_MODE_REL);
3429 }
3430 }
3431
3432 static inline bool
kvfree_call_rcu_add_ptr_to_bulk(struct kfree_rcu_cpu * krcp,void * ptr)3433 kvfree_call_rcu_add_ptr_to_bulk(struct kfree_rcu_cpu *krcp, void *ptr)
3434 {
3435 struct kvfree_rcu_bulk_data *bnode;
3436 int idx;
3437
3438 if (unlikely(!krcp->initialized))
3439 return false;
3440
3441 lockdep_assert_held(&krcp->lock);
3442 idx = !!is_vmalloc_addr(ptr);
3443
3444 /* Check if a new block is required. */
3445 if (!krcp->bkvhead[idx] ||
3446 krcp->bkvhead[idx]->nr_records == KVFREE_BULK_MAX_ENTR) {
3447 bnode = get_cached_bnode(krcp);
3448 /* Switch to emergency path. */
3449 if (!bnode)
3450 return false;
3451
3452 /* Initialize the new block. */
3453 bnode->nr_records = 0;
3454 bnode->next = krcp->bkvhead[idx];
3455
3456 /* Attach it to the head. */
3457 krcp->bkvhead[idx] = bnode;
3458 }
3459
3460 /* Finally insert. */
3461 krcp->bkvhead[idx]->records
3462 [krcp->bkvhead[idx]->nr_records++] = ptr;
3463
3464 return true;
3465 }
3466
3467 /*
3468 * Queue a request for lazy invocation of appropriate free routine after a
3469 * grace period. Please note there are three paths are maintained, two are the
3470 * main ones that use array of pointers interface and third one is emergency
3471 * one, that is used only when the main path can not be maintained temporary,
3472 * due to memory pressure.
3473 *
3474 * Each kvfree_call_rcu() request is added to a batch. The batch will be drained
3475 * every KFREE_DRAIN_JIFFIES number of jiffies. All the objects in the batch will
3476 * be free'd in workqueue context. This allows us to: batch requests together to
3477 * reduce the number of grace periods during heavy kfree_rcu()/kvfree_rcu() load.
3478 */
kvfree_call_rcu(struct rcu_head * head,rcu_callback_t func)3479 void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
3480 {
3481 unsigned long flags;
3482 struct kfree_rcu_cpu *krcp;
3483 bool success;
3484 void *ptr;
3485
3486 if (head) {
3487 ptr = (void *) head - (unsigned long) func;
3488 } else {
3489 /*
3490 * Please note there is a limitation for the head-less
3491 * variant, that is why there is a clear rule for such
3492 * objects: it can be used from might_sleep() context
3493 * only. For other places please embed an rcu_head to
3494 * your data.
3495 */
3496 might_sleep();
3497 ptr = (unsigned long *) func;
3498 }
3499
3500 krcp = krc_this_cpu_lock(&flags);
3501
3502 // Queue the object but don't yet schedule the batch.
3503 if (debug_rcu_head_queue(ptr)) {
3504 // Probable double kfree_rcu(), just leak.
3505 WARN_ONCE(1, "%s(): Double-freed call. rcu_head %p\n",
3506 __func__, head);
3507
3508 // Mark as success and leave.
3509 success = true;
3510 goto unlock_return;
3511 }
3512
3513 success = kvfree_call_rcu_add_ptr_to_bulk(krcp, ptr);
3514 if (!success) {
3515 run_page_cache_worker(krcp);
3516
3517 if (head == NULL)
3518 // Inline if kvfree_rcu(one_arg) call.
3519 goto unlock_return;
3520
3521 head->func = func;
3522 head->next = krcp->head;
3523 krcp->head = head;
3524 success = true;
3525 }
3526
3527 WRITE_ONCE(krcp->count, krcp->count + 1);
3528
3529 // Set timer to drain after KFREE_DRAIN_JIFFIES.
3530 if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
3531 !krcp->monitor_todo) {
3532 krcp->monitor_todo = true;
3533 schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
3534 }
3535
3536 unlock_return:
3537 krc_this_cpu_unlock(krcp, flags);
3538
3539 /*
3540 * Inline kvfree() after synchronize_rcu(). We can do
3541 * it from might_sleep() context only, so the current
3542 * CPU can pass the QS state.
3543 */
3544 if (!success) {
3545 debug_rcu_head_unqueue((struct rcu_head *) ptr);
3546 synchronize_rcu();
3547 kvfree(ptr);
3548 }
3549 }
3550 EXPORT_SYMBOL_GPL(kvfree_call_rcu);
3551
3552 static unsigned long
kfree_rcu_shrink_count(struct shrinker * shrink,struct shrink_control * sc)3553 kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
3554 {
3555 int cpu;
3556 unsigned long count = 0;
3557
3558 /* Snapshot count of all CPUs */
3559 for_each_possible_cpu(cpu) {
3560 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3561
3562 count += READ_ONCE(krcp->count);
3563 }
3564
3565 return count;
3566 }
3567
3568 static unsigned long
kfree_rcu_shrink_scan(struct shrinker * shrink,struct shrink_control * sc)3569 kfree_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
3570 {
3571 int cpu, freed = 0;
3572 unsigned long flags;
3573
3574 for_each_possible_cpu(cpu) {
3575 int count;
3576 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3577
3578 count = krcp->count;
3579 raw_spin_lock_irqsave(&krcp->lock, flags);
3580 if (krcp->monitor_todo)
3581 kfree_rcu_drain_unlock(krcp, flags);
3582 else
3583 raw_spin_unlock_irqrestore(&krcp->lock, flags);
3584
3585 sc->nr_to_scan -= count;
3586 freed += count;
3587
3588 if (sc->nr_to_scan <= 0)
3589 break;
3590 }
3591
3592 return freed == 0 ? SHRINK_STOP : freed;
3593 }
3594
3595 static struct shrinker kfree_rcu_shrinker = {
3596 .count_objects = kfree_rcu_shrink_count,
3597 .scan_objects = kfree_rcu_shrink_scan,
3598 .batch = 0,
3599 .seeks = DEFAULT_SEEKS,
3600 };
3601
kfree_rcu_scheduler_running(void)3602 void __init kfree_rcu_scheduler_running(void)
3603 {
3604 int cpu;
3605 unsigned long flags;
3606
3607 for_each_possible_cpu(cpu) {
3608 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3609
3610 raw_spin_lock_irqsave(&krcp->lock, flags);
3611 if (!krcp->head || krcp->monitor_todo) {
3612 raw_spin_unlock_irqrestore(&krcp->lock, flags);
3613 continue;
3614 }
3615 krcp->monitor_todo = true;
3616 schedule_delayed_work_on(cpu, &krcp->monitor_work,
3617 KFREE_DRAIN_JIFFIES);
3618 raw_spin_unlock_irqrestore(&krcp->lock, flags);
3619 }
3620 }
3621
3622 /*
3623 * During early boot, any blocking grace-period wait automatically
3624 * implies a grace period. Later on, this is never the case for PREEMPTION.
3625 *
3626 * Howevr, because a context switch is a grace period for !PREEMPTION, any
3627 * blocking grace-period wait automatically implies a grace period if
3628 * there is only one CPU online at any point time during execution of
3629 * either synchronize_rcu() or synchronize_rcu_expedited(). It is OK to
3630 * occasionally incorrectly indicate that there are multiple CPUs online
3631 * when there was in fact only one the whole time, as this just adds some
3632 * overhead: RCU still operates correctly.
3633 */
rcu_blocking_is_gp(void)3634 static int rcu_blocking_is_gp(void)
3635 {
3636 int ret;
3637
3638 if (IS_ENABLED(CONFIG_PREEMPTION))
3639 return rcu_scheduler_active == RCU_SCHEDULER_INACTIVE;
3640 might_sleep(); /* Check for RCU read-side critical section. */
3641 preempt_disable();
3642 ret = num_online_cpus() <= 1;
3643 preempt_enable();
3644 return ret;
3645 }
3646
3647 /**
3648 * synchronize_rcu - wait until a grace period has elapsed.
3649 *
3650 * Control will return to the caller some time after a full grace
3651 * period has elapsed, in other words after all currently executing RCU
3652 * read-side critical sections have completed. Note, however, that
3653 * upon return from synchronize_rcu(), the caller might well be executing
3654 * concurrently with new RCU read-side critical sections that began while
3655 * synchronize_rcu() was waiting. RCU read-side critical sections are
3656 * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
3657 * In addition, regions of code across which interrupts, preemption, or
3658 * softirqs have been disabled also serve as RCU read-side critical
3659 * sections. This includes hardware interrupt handlers, softirq handlers,
3660 * and NMI handlers.
3661 *
3662 * Note that this guarantee implies further memory-ordering guarantees.
3663 * On systems with more than one CPU, when synchronize_rcu() returns,
3664 * each CPU is guaranteed to have executed a full memory barrier since
3665 * the end of its last RCU read-side critical section whose beginning
3666 * preceded the call to synchronize_rcu(). In addition, each CPU having
3667 * an RCU read-side critical section that extends beyond the return from
3668 * synchronize_rcu() is guaranteed to have executed a full memory barrier
3669 * after the beginning of synchronize_rcu() and before the beginning of
3670 * that RCU read-side critical section. Note that these guarantees include
3671 * CPUs that are offline, idle, or executing in user mode, as well as CPUs
3672 * that are executing in the kernel.
3673 *
3674 * Furthermore, if CPU A invoked synchronize_rcu(), which returned
3675 * to its caller on CPU B, then both CPU A and CPU B are guaranteed
3676 * to have executed a full memory barrier during the execution of
3677 * synchronize_rcu() -- even if CPU A and CPU B are the same CPU (but
3678 * again only if the system has more than one CPU).
3679 */
synchronize_rcu(void)3680 void synchronize_rcu(void)
3681 {
3682 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
3683 lock_is_held(&rcu_lock_map) ||
3684 lock_is_held(&rcu_sched_lock_map),
3685 "Illegal synchronize_rcu() in RCU read-side critical section");
3686 if (rcu_blocking_is_gp())
3687 return;
3688 if (rcu_gp_is_expedited())
3689 synchronize_rcu_expedited();
3690 else
3691 wait_rcu_gp(call_rcu);
3692 }
3693 EXPORT_SYMBOL_GPL(synchronize_rcu);
3694
3695 /**
3696 * get_state_synchronize_rcu - Snapshot current RCU state
3697 *
3698 * Returns a cookie that is used by a later call to cond_synchronize_rcu()
3699 * to determine whether or not a full grace period has elapsed in the
3700 * meantime.
3701 */
get_state_synchronize_rcu(void)3702 unsigned long get_state_synchronize_rcu(void)
3703 {
3704 /*
3705 * Any prior manipulation of RCU-protected data must happen
3706 * before the load from ->gp_seq.
3707 */
3708 smp_mb(); /* ^^^ */
3709 return rcu_seq_snap(&rcu_state.gp_seq);
3710 }
3711 EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
3712
3713 /**
3714 * cond_synchronize_rcu - Conditionally wait for an RCU grace period
3715 *
3716 * @oldstate: return value from earlier call to get_state_synchronize_rcu()
3717 *
3718 * If a full RCU grace period has elapsed since the earlier call to
3719 * get_state_synchronize_rcu(), just return. Otherwise, invoke
3720 * synchronize_rcu() to wait for a full grace period.
3721 *
3722 * Yes, this function does not take counter wrap into account. But
3723 * counter wrap is harmless. If the counter wraps, we have waited for
3724 * more than 2 billion grace periods (and way more on a 64-bit system!),
3725 * so waiting for one additional grace period should be just fine.
3726 */
cond_synchronize_rcu(unsigned long oldstate)3727 void cond_synchronize_rcu(unsigned long oldstate)
3728 {
3729 if (!rcu_seq_done(&rcu_state.gp_seq, oldstate))
3730 synchronize_rcu();
3731 else
3732 smp_mb(); /* Ensure GP ends before subsequent accesses. */
3733 }
3734 EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
3735
3736 /*
3737 * Check to see if there is any immediate RCU-related work to be done by
3738 * the current CPU, returning 1 if so and zero otherwise. The checks are
3739 * in order of increasing expense: checks that can be carried out against
3740 * CPU-local state are performed first. However, we must check for CPU
3741 * stalls first, else we might not get a chance.
3742 */
rcu_pending(int user)3743 static int rcu_pending(int user)
3744 {
3745 bool gp_in_progress;
3746 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
3747 struct rcu_node *rnp = rdp->mynode;
3748
3749 lockdep_assert_irqs_disabled();
3750
3751 /* Check for CPU stalls, if enabled. */
3752 check_cpu_stall(rdp);
3753
3754 /* Does this CPU need a deferred NOCB wakeup? */
3755 if (rcu_nocb_need_deferred_wakeup(rdp))
3756 return 1;
3757
3758 /* Is this a nohz_full CPU in userspace or idle? (Ignore RCU if so.) */
3759 if ((user || rcu_is_cpu_rrupt_from_idle()) && rcu_nohz_full_cpu())
3760 return 0;
3761
3762 /* Is the RCU core waiting for a quiescent state from this CPU? */
3763 gp_in_progress = rcu_gp_in_progress();
3764 if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm && gp_in_progress)
3765 return 1;
3766
3767 /* Does this CPU have callbacks ready to invoke? */
3768 if (rcu_segcblist_ready_cbs(&rdp->cblist))
3769 return 1;
3770
3771 /* Has RCU gone idle with this CPU needing another grace period? */
3772 if (!gp_in_progress && rcu_segcblist_is_enabled(&rdp->cblist) &&
3773 (!IS_ENABLED(CONFIG_RCU_NOCB_CPU) ||
3774 !rcu_segcblist_is_offloaded(&rdp->cblist)) &&
3775 !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
3776 return 1;
3777
3778 /* Have RCU grace period completed or started? */
3779 if (rcu_seq_current(&rnp->gp_seq) != rdp->gp_seq ||
3780 unlikely(READ_ONCE(rdp->gpwrap))) /* outside lock */
3781 return 1;
3782
3783 /* nothing to do */
3784 return 0;
3785 }
3786
3787 /*
3788 * Helper function for rcu_barrier() tracing. If tracing is disabled,
3789 * the compiler is expected to optimize this away.
3790 */
rcu_barrier_trace(const char * s,int cpu,unsigned long done)3791 static void rcu_barrier_trace(const char *s, int cpu, unsigned long done)
3792 {
3793 trace_rcu_barrier(rcu_state.name, s, cpu,
3794 atomic_read(&rcu_state.barrier_cpu_count), done);
3795 }
3796
3797 /*
3798 * RCU callback function for rcu_barrier(). If we are last, wake
3799 * up the task executing rcu_barrier().
3800 *
3801 * Note that the value of rcu_state.barrier_sequence must be captured
3802 * before the atomic_dec_and_test(). Otherwise, if this CPU is not last,
3803 * other CPUs might count the value down to zero before this CPU gets
3804 * around to invoking rcu_barrier_trace(), which might result in bogus
3805 * data from the next instance of rcu_barrier().
3806 */
rcu_barrier_callback(struct rcu_head * rhp)3807 static void rcu_barrier_callback(struct rcu_head *rhp)
3808 {
3809 unsigned long __maybe_unused s = rcu_state.barrier_sequence;
3810
3811 if (atomic_dec_and_test(&rcu_state.barrier_cpu_count)) {
3812 rcu_barrier_trace(TPS("LastCB"), -1, s);
3813 complete(&rcu_state.barrier_completion);
3814 } else {
3815 rcu_barrier_trace(TPS("CB"), -1, s);
3816 }
3817 }
3818
3819 /*
3820 * Called with preemption disabled, and from cross-cpu IRQ context.
3821 */
rcu_barrier_func(void * cpu_in)3822 static void rcu_barrier_func(void *cpu_in)
3823 {
3824 uintptr_t cpu = (uintptr_t)cpu_in;
3825 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
3826
3827 rcu_barrier_trace(TPS("IRQ"), -1, rcu_state.barrier_sequence);
3828 rdp->barrier_head.func = rcu_barrier_callback;
3829 debug_rcu_head_queue(&rdp->barrier_head);
3830 rcu_nocb_lock(rdp);
3831 WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies));
3832 if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head)) {
3833 atomic_inc(&rcu_state.barrier_cpu_count);
3834 } else {
3835 debug_rcu_head_unqueue(&rdp->barrier_head);
3836 rcu_barrier_trace(TPS("IRQNQ"), -1,
3837 rcu_state.barrier_sequence);
3838 }
3839 rcu_nocb_unlock(rdp);
3840 }
3841
3842 /**
3843 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
3844 *
3845 * Note that this primitive does not necessarily wait for an RCU grace period
3846 * to complete. For example, if there are no RCU callbacks queued anywhere
3847 * in the system, then rcu_barrier() is within its rights to return
3848 * immediately, without waiting for anything, much less an RCU grace period.
3849 */
rcu_barrier(void)3850 void rcu_barrier(void)
3851 {
3852 uintptr_t cpu;
3853 struct rcu_data *rdp;
3854 unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence);
3855
3856 rcu_barrier_trace(TPS("Begin"), -1, s);
3857
3858 /* Take mutex to serialize concurrent rcu_barrier() requests. */
3859 mutex_lock(&rcu_state.barrier_mutex);
3860
3861 /* Did someone else do our work for us? */
3862 if (rcu_seq_done(&rcu_state.barrier_sequence, s)) {
3863 rcu_barrier_trace(TPS("EarlyExit"), -1,
3864 rcu_state.barrier_sequence);
3865 smp_mb(); /* caller's subsequent code after above check. */
3866 mutex_unlock(&rcu_state.barrier_mutex);
3867 return;
3868 }
3869
3870 /* Mark the start of the barrier operation. */
3871 rcu_seq_start(&rcu_state.barrier_sequence);
3872 rcu_barrier_trace(TPS("Inc1"), -1, rcu_state.barrier_sequence);
3873
3874 /*
3875 * Initialize the count to two rather than to zero in order
3876 * to avoid a too-soon return to zero in case of an immediate
3877 * invocation of the just-enqueued callback (or preemption of
3878 * this task). Exclude CPU-hotplug operations to ensure that no
3879 * offline non-offloaded CPU has callbacks queued.
3880 */
3881 init_completion(&rcu_state.barrier_completion);
3882 atomic_set(&rcu_state.barrier_cpu_count, 2);
3883 get_online_cpus();
3884
3885 /*
3886 * Force each CPU with callbacks to register a new callback.
3887 * When that callback is invoked, we will know that all of the
3888 * corresponding CPU's preceding callbacks have been invoked.
3889 */
3890 for_each_possible_cpu(cpu) {
3891 rdp = per_cpu_ptr(&rcu_data, cpu);
3892 if (cpu_is_offline(cpu) &&
3893 !rcu_segcblist_is_offloaded(&rdp->cblist))
3894 continue;
3895 if (rcu_segcblist_n_cbs(&rdp->cblist) && cpu_online(cpu)) {
3896 rcu_barrier_trace(TPS("OnlineQ"), cpu,
3897 rcu_state.barrier_sequence);
3898 smp_call_function_single(cpu, rcu_barrier_func, (void *)cpu, 1);
3899 } else if (rcu_segcblist_n_cbs(&rdp->cblist) &&
3900 cpu_is_offline(cpu)) {
3901 rcu_barrier_trace(TPS("OfflineNoCBQ"), cpu,
3902 rcu_state.barrier_sequence);
3903 local_irq_disable();
3904 rcu_barrier_func((void *)cpu);
3905 local_irq_enable();
3906 } else if (cpu_is_offline(cpu)) {
3907 rcu_barrier_trace(TPS("OfflineNoCBNoQ"), cpu,
3908 rcu_state.barrier_sequence);
3909 } else {
3910 rcu_barrier_trace(TPS("OnlineNQ"), cpu,
3911 rcu_state.barrier_sequence);
3912 }
3913 }
3914 put_online_cpus();
3915
3916 /*
3917 * Now that we have an rcu_barrier_callback() callback on each
3918 * CPU, and thus each counted, remove the initial count.
3919 */
3920 if (atomic_sub_and_test(2, &rcu_state.barrier_cpu_count))
3921 complete(&rcu_state.barrier_completion);
3922
3923 /* Wait for all rcu_barrier_callback() callbacks to be invoked. */
3924 wait_for_completion(&rcu_state.barrier_completion);
3925
3926 /* Mark the end of the barrier operation. */
3927 rcu_barrier_trace(TPS("Inc2"), -1, rcu_state.barrier_sequence);
3928 rcu_seq_end(&rcu_state.barrier_sequence);
3929
3930 /* Other rcu_barrier() invocations can now safely proceed. */
3931 mutex_unlock(&rcu_state.barrier_mutex);
3932 }
3933 EXPORT_SYMBOL_GPL(rcu_barrier);
3934
3935 /*
3936 * Propagate ->qsinitmask bits up the rcu_node tree to account for the
3937 * first CPU in a given leaf rcu_node structure coming online. The caller
3938 * must hold the corresponding leaf rcu_node ->lock with interrrupts
3939 * disabled.
3940 */
rcu_init_new_rnp(struct rcu_node * rnp_leaf)3941 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
3942 {
3943 long mask;
3944 long oldmask;
3945 struct rcu_node *rnp = rnp_leaf;
3946
3947 raw_lockdep_assert_held_rcu_node(rnp_leaf);
3948 WARN_ON_ONCE(rnp->wait_blkd_tasks);
3949 for (;;) {
3950 mask = rnp->grpmask;
3951 rnp = rnp->parent;
3952 if (rnp == NULL)
3953 return;
3954 raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */
3955 oldmask = rnp->qsmaskinit;
3956 rnp->qsmaskinit |= mask;
3957 raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */
3958 if (oldmask)
3959 return;
3960 }
3961 }
3962
3963 /*
3964 * Do boot-time initialization of a CPU's per-CPU RCU data.
3965 */
3966 static void __init
rcu_boot_init_percpu_data(int cpu)3967 rcu_boot_init_percpu_data(int cpu)
3968 {
3969 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
3970
3971 /* Set up local state, ensuring consistent view of global state. */
3972 rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
3973 INIT_WORK(&rdp->strict_work, strict_work_handler);
3974 WARN_ON_ONCE(rdp->dynticks_nesting != 1);
3975 WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp)));
3976 rdp->rcu_ofl_gp_seq = rcu_state.gp_seq;
3977 rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED;
3978 rdp->rcu_onl_gp_seq = rcu_state.gp_seq;
3979 rdp->rcu_onl_gp_flags = RCU_GP_CLEANED;
3980 rdp->cpu = cpu;
3981 rcu_boot_init_nocb_percpu_data(rdp);
3982 }
3983
3984 /*
3985 * Invoked early in the CPU-online process, when pretty much all services
3986 * are available. The incoming CPU is not present.
3987 *
3988 * Initializes a CPU's per-CPU RCU data. Note that only one online or
3989 * offline event can be happening at a given time. Note also that we can
3990 * accept some slop in the rsp->gp_seq access due to the fact that this
3991 * CPU cannot possibly have any non-offloaded RCU callbacks in flight yet.
3992 * And any offloaded callbacks are being numbered elsewhere.
3993 */
rcutree_prepare_cpu(unsigned int cpu)3994 int rcutree_prepare_cpu(unsigned int cpu)
3995 {
3996 unsigned long flags;
3997 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
3998 struct rcu_node *rnp = rcu_get_root();
3999
4000 /* Set up local state, ensuring consistent view of global state. */
4001 raw_spin_lock_irqsave_rcu_node(rnp, flags);
4002 rdp->qlen_last_fqs_check = 0;
4003 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
4004 rdp->blimit = blimit;
4005 if (rcu_segcblist_empty(&rdp->cblist) && /* No early-boot CBs? */
4006 !rcu_segcblist_is_offloaded(&rdp->cblist))
4007 rcu_segcblist_init(&rdp->cblist); /* Re-enable callbacks. */
4008 rdp->dynticks_nesting = 1; /* CPU not up, no tearing. */
4009 rcu_dynticks_eqs_online();
4010 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
4011
4012 /*
4013 * Add CPU to leaf rcu_node pending-online bitmask. Any needed
4014 * propagation up the rcu_node tree will happen at the beginning
4015 * of the next grace period.
4016 */
4017 rnp = rdp->mynode;
4018 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
4019 rdp->beenonline = true; /* We have now been online. */
4020 rdp->gp_seq = READ_ONCE(rnp->gp_seq);
4021 rdp->gp_seq_needed = rdp->gp_seq;
4022 rdp->cpu_no_qs.b.norm = true;
4023 rdp->core_needs_qs = false;
4024 rdp->rcu_iw_pending = false;
4025 rdp->rcu_iw_gp_seq = rdp->gp_seq - 1;
4026 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl"));
4027 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4028 rcu_prepare_kthreads(cpu);
4029 rcu_spawn_cpu_nocb_kthread(cpu);
4030
4031 return 0;
4032 }
4033
4034 /*
4035 * Update RCU priority boot kthread affinity for CPU-hotplug changes.
4036 */
rcutree_affinity_setting(unsigned int cpu,int outgoing)4037 static void rcutree_affinity_setting(unsigned int cpu, int outgoing)
4038 {
4039 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4040
4041 rcu_boost_kthread_setaffinity(rdp->mynode, outgoing);
4042 }
4043
4044 /*
4045 * Near the end of the CPU-online process. Pretty much all services
4046 * enabled, and the CPU is now very much alive.
4047 */
rcutree_online_cpu(unsigned int cpu)4048 int rcutree_online_cpu(unsigned int cpu)
4049 {
4050 unsigned long flags;
4051 struct rcu_data *rdp;
4052 struct rcu_node *rnp;
4053
4054 rdp = per_cpu_ptr(&rcu_data, cpu);
4055 rnp = rdp->mynode;
4056 raw_spin_lock_irqsave_rcu_node(rnp, flags);
4057 rnp->ffmask |= rdp->grpmask;
4058 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4059 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
4060 return 0; /* Too early in boot for scheduler work. */
4061 sync_sched_exp_online_cleanup(cpu);
4062 rcutree_affinity_setting(cpu, -1);
4063
4064 // Stop-machine done, so allow nohz_full to disable tick.
4065 tick_dep_clear(TICK_DEP_BIT_RCU);
4066 return 0;
4067 }
4068
4069 /*
4070 * Near the beginning of the process. The CPU is still very much alive
4071 * with pretty much all services enabled.
4072 */
rcutree_offline_cpu(unsigned int cpu)4073 int rcutree_offline_cpu(unsigned int cpu)
4074 {
4075 unsigned long flags;
4076 struct rcu_data *rdp;
4077 struct rcu_node *rnp;
4078
4079 rdp = per_cpu_ptr(&rcu_data, cpu);
4080 rnp = rdp->mynode;
4081 raw_spin_lock_irqsave_rcu_node(rnp, flags);
4082 rnp->ffmask &= ~rdp->grpmask;
4083 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4084
4085 rcutree_affinity_setting(cpu, cpu);
4086
4087 // nohz_full CPUs need the tick for stop-machine to work quickly
4088 tick_dep_set(TICK_DEP_BIT_RCU);
4089 return 0;
4090 }
4091
4092 /*
4093 * Mark the specified CPU as being online so that subsequent grace periods
4094 * (both expedited and normal) will wait on it. Note that this means that
4095 * incoming CPUs are not allowed to use RCU read-side critical sections
4096 * until this function is called. Failing to observe this restriction
4097 * will result in lockdep splats.
4098 *
4099 * Note that this function is special in that it is invoked directly
4100 * from the incoming CPU rather than from the cpuhp_step mechanism.
4101 * This is because this function must be invoked at a precise location.
4102 */
rcu_cpu_starting(unsigned int cpu)4103 void rcu_cpu_starting(unsigned int cpu)
4104 {
4105 unsigned long flags;
4106 unsigned long mask;
4107 struct rcu_data *rdp;
4108 struct rcu_node *rnp;
4109 bool newcpu;
4110
4111 rdp = per_cpu_ptr(&rcu_data, cpu);
4112 if (rdp->cpu_started)
4113 return;
4114 rdp->cpu_started = true;
4115
4116 rnp = rdp->mynode;
4117 mask = rdp->grpmask;
4118 WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1);
4119 WARN_ON_ONCE(!(rnp->ofl_seq & 0x1));
4120 smp_mb(); // Pair with rcu_gp_cleanup()'s ->ofl_seq barrier().
4121 raw_spin_lock_irqsave_rcu_node(rnp, flags);
4122 WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext | mask);
4123 newcpu = !(rnp->expmaskinitnext & mask);
4124 rnp->expmaskinitnext |= mask;
4125 /* Allow lockless access for expedited grace periods. */
4126 smp_store_release(&rcu_state.ncpus, rcu_state.ncpus + newcpu); /* ^^^ */
4127 ASSERT_EXCLUSIVE_WRITER(rcu_state.ncpus);
4128 rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */
4129 rdp->rcu_onl_gp_seq = READ_ONCE(rcu_state.gp_seq);
4130 rdp->rcu_onl_gp_flags = READ_ONCE(rcu_state.gp_flags);
4131 if (rnp->qsmask & mask) { /* RCU waiting on incoming CPU? */
4132 rcu_disable_urgency_upon_qs(rdp);
4133 /* Report QS -after- changing ->qsmaskinitnext! */
4134 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
4135 } else {
4136 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4137 }
4138 smp_mb(); // Pair with rcu_gp_cleanup()'s ->ofl_seq barrier().
4139 WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1);
4140 WARN_ON_ONCE(rnp->ofl_seq & 0x1);
4141 smp_mb(); /* Ensure RCU read-side usage follows above initialization. */
4142 }
4143
4144 /*
4145 * The outgoing function has no further need of RCU, so remove it from
4146 * the rcu_node tree's ->qsmaskinitnext bit masks.
4147 *
4148 * Note that this function is special in that it is invoked directly
4149 * from the outgoing CPU rather than from the cpuhp_step mechanism.
4150 * This is because this function must be invoked at a precise location.
4151 */
rcu_report_dead(unsigned int cpu)4152 void rcu_report_dead(unsigned int cpu)
4153 {
4154 unsigned long flags;
4155 unsigned long mask;
4156 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4157 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
4158
4159 /* QS for any half-done expedited grace period. */
4160 preempt_disable();
4161 rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
4162 preempt_enable();
4163 rcu_preempt_deferred_qs(current);
4164
4165 /* Remove outgoing CPU from mask in the leaf rcu_node structure. */
4166 mask = rdp->grpmask;
4167 WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1);
4168 WARN_ON_ONCE(!(rnp->ofl_seq & 0x1));
4169 smp_mb(); // Pair with rcu_gp_cleanup()'s ->ofl_seq barrier().
4170 raw_spin_lock(&rcu_state.ofl_lock);
4171 raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
4172 rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq);
4173 rdp->rcu_ofl_gp_flags = READ_ONCE(rcu_state.gp_flags);
4174 if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */
4175 /* Report quiescent state -before- changing ->qsmaskinitnext! */
4176 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
4177 raw_spin_lock_irqsave_rcu_node(rnp, flags);
4178 }
4179 WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext & ~mask);
4180 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4181 raw_spin_unlock(&rcu_state.ofl_lock);
4182 smp_mb(); // Pair with rcu_gp_cleanup()'s ->ofl_seq barrier().
4183 WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1);
4184 WARN_ON_ONCE(rnp->ofl_seq & 0x1);
4185
4186 rdp->cpu_started = false;
4187 }
4188
4189 #ifdef CONFIG_HOTPLUG_CPU
4190 /*
4191 * The outgoing CPU has just passed through the dying-idle state, and we
4192 * are being invoked from the CPU that was IPIed to continue the offline
4193 * operation. Migrate the outgoing CPU's callbacks to the current CPU.
4194 */
rcutree_migrate_callbacks(int cpu)4195 void rcutree_migrate_callbacks(int cpu)
4196 {
4197 unsigned long flags;
4198 struct rcu_data *my_rdp;
4199 struct rcu_node *my_rnp;
4200 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4201 bool needwake;
4202
4203 if (rcu_segcblist_is_offloaded(&rdp->cblist) ||
4204 rcu_segcblist_empty(&rdp->cblist))
4205 return; /* No callbacks to migrate. */
4206
4207 local_irq_save(flags);
4208 my_rdp = this_cpu_ptr(&rcu_data);
4209 my_rnp = my_rdp->mynode;
4210 rcu_nocb_lock(my_rdp); /* irqs already disabled. */
4211 WARN_ON_ONCE(!rcu_nocb_flush_bypass(my_rdp, NULL, jiffies));
4212 raw_spin_lock_rcu_node(my_rnp); /* irqs already disabled. */
4213 /* Leverage recent GPs and set GP for new callbacks. */
4214 needwake = rcu_advance_cbs(my_rnp, rdp) ||
4215 rcu_advance_cbs(my_rnp, my_rdp);
4216 rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist);
4217 needwake = needwake || rcu_advance_cbs(my_rnp, my_rdp);
4218 rcu_segcblist_disable(&rdp->cblist);
4219 WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) !=
4220 !rcu_segcblist_n_cbs(&my_rdp->cblist));
4221 if (rcu_segcblist_is_offloaded(&my_rdp->cblist)) {
4222 raw_spin_unlock_rcu_node(my_rnp); /* irqs remain disabled. */
4223 __call_rcu_nocb_wake(my_rdp, true, flags);
4224 } else {
4225 rcu_nocb_unlock(my_rdp); /* irqs remain disabled. */
4226 raw_spin_unlock_irqrestore_rcu_node(my_rnp, flags);
4227 }
4228 if (needwake)
4229 rcu_gp_kthread_wake();
4230 lockdep_assert_irqs_enabled();
4231 WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 ||
4232 !rcu_segcblist_empty(&rdp->cblist),
4233 "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, 1stCB=%p\n",
4234 cpu, rcu_segcblist_n_cbs(&rdp->cblist),
4235 rcu_segcblist_first_cb(&rdp->cblist));
4236 }
4237 #endif
4238
4239 /*
4240 * On non-huge systems, use expedited RCU grace periods to make suspend
4241 * and hibernation run faster.
4242 */
rcu_pm_notify(struct notifier_block * self,unsigned long action,void * hcpu)4243 static int rcu_pm_notify(struct notifier_block *self,
4244 unsigned long action, void *hcpu)
4245 {
4246 switch (action) {
4247 case PM_HIBERNATION_PREPARE:
4248 case PM_SUSPEND_PREPARE:
4249 rcu_expedite_gp();
4250 break;
4251 case PM_POST_HIBERNATION:
4252 case PM_POST_SUSPEND:
4253 rcu_unexpedite_gp();
4254 break;
4255 default:
4256 break;
4257 }
4258 return NOTIFY_OK;
4259 }
4260
4261 /*
4262 * Spawn the kthreads that handle RCU's grace periods.
4263 */
rcu_spawn_gp_kthread(void)4264 static int __init rcu_spawn_gp_kthread(void)
4265 {
4266 unsigned long flags;
4267 int kthread_prio_in = kthread_prio;
4268 struct rcu_node *rnp;
4269 struct sched_param sp;
4270 struct task_struct *t;
4271
4272 /* Force priority into range. */
4273 if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 2
4274 && IS_BUILTIN(CONFIG_RCU_TORTURE_TEST))
4275 kthread_prio = 2;
4276 else if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
4277 kthread_prio = 1;
4278 else if (kthread_prio < 0)
4279 kthread_prio = 0;
4280 else if (kthread_prio > 99)
4281 kthread_prio = 99;
4282
4283 if (kthread_prio != kthread_prio_in)
4284 pr_alert("rcu_spawn_gp_kthread(): Limited prio to %d from %d\n",
4285 kthread_prio, kthread_prio_in);
4286
4287 rcu_scheduler_fully_active = 1;
4288 t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name);
4289 if (WARN_ONCE(IS_ERR(t), "%s: Could not start grace-period kthread, OOM is now expected behavior\n", __func__))
4290 return 0;
4291 if (kthread_prio) {
4292 sp.sched_priority = kthread_prio;
4293 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
4294 }
4295 rnp = rcu_get_root();
4296 raw_spin_lock_irqsave_rcu_node(rnp, flags);
4297 WRITE_ONCE(rcu_state.gp_activity, jiffies);
4298 WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
4299 // Reset .gp_activity and .gp_req_activity before setting .gp_kthread.
4300 smp_store_release(&rcu_state.gp_kthread, t); /* ^^^ */
4301 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4302 wake_up_process(t);
4303 rcu_spawn_nocb_kthreads();
4304 rcu_spawn_boost_kthreads();
4305 rcu_spawn_core_kthreads();
4306 return 0;
4307 }
4308 early_initcall(rcu_spawn_gp_kthread);
4309
4310 /*
4311 * This function is invoked towards the end of the scheduler's
4312 * initialization process. Before this is called, the idle task might
4313 * contain synchronous grace-period primitives (during which time, this idle
4314 * task is booting the system, and such primitives are no-ops). After this
4315 * function is called, any synchronous grace-period primitives are run as
4316 * expedited, with the requesting task driving the grace period forward.
4317 * A later core_initcall() rcu_set_runtime_mode() will switch to full
4318 * runtime RCU functionality.
4319 */
rcu_scheduler_starting(void)4320 void rcu_scheduler_starting(void)
4321 {
4322 WARN_ON(num_online_cpus() != 1);
4323 WARN_ON(nr_context_switches() > 0);
4324 rcu_test_sync_prims();
4325 rcu_scheduler_active = RCU_SCHEDULER_INIT;
4326 rcu_test_sync_prims();
4327 }
4328
4329 /*
4330 * Helper function for rcu_init() that initializes the rcu_state structure.
4331 */
rcu_init_one(void)4332 static void __init rcu_init_one(void)
4333 {
4334 static const char * const buf[] = RCU_NODE_NAME_INIT;
4335 static const char * const fqs[] = RCU_FQS_NAME_INIT;
4336 static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
4337 static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
4338
4339 int levelspread[RCU_NUM_LVLS]; /* kids/node in each level. */
4340 int cpustride = 1;
4341 int i;
4342 int j;
4343 struct rcu_node *rnp;
4344
4345 BUILD_BUG_ON(RCU_NUM_LVLS > ARRAY_SIZE(buf)); /* Fix buf[] init! */
4346
4347 /* Silence gcc 4.8 false positive about array index out of range. */
4348 if (rcu_num_lvls <= 0 || rcu_num_lvls > RCU_NUM_LVLS)
4349 panic("rcu_init_one: rcu_num_lvls out of range");
4350
4351 /* Initialize the level-tracking arrays. */
4352
4353 for (i = 1; i < rcu_num_lvls; i++)
4354 rcu_state.level[i] =
4355 rcu_state.level[i - 1] + num_rcu_lvl[i - 1];
4356 rcu_init_levelspread(levelspread, num_rcu_lvl);
4357
4358 /* Initialize the elements themselves, starting from the leaves. */
4359
4360 for (i = rcu_num_lvls - 1; i >= 0; i--) {
4361 cpustride *= levelspread[i];
4362 rnp = rcu_state.level[i];
4363 for (j = 0; j < num_rcu_lvl[i]; j++, rnp++) {
4364 raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock));
4365 lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock),
4366 &rcu_node_class[i], buf[i]);
4367 raw_spin_lock_init(&rnp->fqslock);
4368 lockdep_set_class_and_name(&rnp->fqslock,
4369 &rcu_fqs_class[i], fqs[i]);
4370 rnp->gp_seq = rcu_state.gp_seq;
4371 rnp->gp_seq_needed = rcu_state.gp_seq;
4372 rnp->completedqs = rcu_state.gp_seq;
4373 rnp->qsmask = 0;
4374 rnp->qsmaskinit = 0;
4375 rnp->grplo = j * cpustride;
4376 rnp->grphi = (j + 1) * cpustride - 1;
4377 if (rnp->grphi >= nr_cpu_ids)
4378 rnp->grphi = nr_cpu_ids - 1;
4379 if (i == 0) {
4380 rnp->grpnum = 0;
4381 rnp->grpmask = 0;
4382 rnp->parent = NULL;
4383 } else {
4384 rnp->grpnum = j % levelspread[i - 1];
4385 rnp->grpmask = BIT(rnp->grpnum);
4386 rnp->parent = rcu_state.level[i - 1] +
4387 j / levelspread[i - 1];
4388 }
4389 rnp->level = i;
4390 INIT_LIST_HEAD(&rnp->blkd_tasks);
4391 rcu_init_one_nocb(rnp);
4392 init_waitqueue_head(&rnp->exp_wq[0]);
4393 init_waitqueue_head(&rnp->exp_wq[1]);
4394 init_waitqueue_head(&rnp->exp_wq[2]);
4395 init_waitqueue_head(&rnp->exp_wq[3]);
4396 spin_lock_init(&rnp->exp_lock);
4397 }
4398 }
4399
4400 init_swait_queue_head(&rcu_state.gp_wq);
4401 init_swait_queue_head(&rcu_state.expedited_wq);
4402 rnp = rcu_first_leaf_node();
4403 for_each_possible_cpu(i) {
4404 while (i > rnp->grphi)
4405 rnp++;
4406 per_cpu_ptr(&rcu_data, i)->mynode = rnp;
4407 rcu_boot_init_percpu_data(i);
4408 }
4409 }
4410
4411 /*
4412 * Compute the rcu_node tree geometry from kernel parameters. This cannot
4413 * replace the definitions in tree.h because those are needed to size
4414 * the ->node array in the rcu_state structure.
4415 */
rcu_init_geometry(void)4416 void rcu_init_geometry(void)
4417 {
4418 ulong d;
4419 int i;
4420 static unsigned long old_nr_cpu_ids;
4421 int rcu_capacity[RCU_NUM_LVLS];
4422 static bool initialized;
4423
4424 if (initialized) {
4425 /*
4426 * Warn if setup_nr_cpu_ids() had not yet been invoked,
4427 * unless nr_cpus_ids == NR_CPUS, in which case who cares?
4428 */
4429 WARN_ON_ONCE(old_nr_cpu_ids != nr_cpu_ids);
4430 return;
4431 }
4432
4433 old_nr_cpu_ids = nr_cpu_ids;
4434 initialized = true;
4435
4436 /*
4437 * Initialize any unspecified boot parameters.
4438 * The default values of jiffies_till_first_fqs and
4439 * jiffies_till_next_fqs are set to the RCU_JIFFIES_TILL_FORCE_QS
4440 * value, which is a function of HZ, then adding one for each
4441 * RCU_JIFFIES_FQS_DIV CPUs that might be on the system.
4442 */
4443 d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
4444 if (jiffies_till_first_fqs == ULONG_MAX)
4445 jiffies_till_first_fqs = d;
4446 if (jiffies_till_next_fqs == ULONG_MAX)
4447 jiffies_till_next_fqs = d;
4448 adjust_jiffies_till_sched_qs();
4449
4450 /* If the compile-time values are accurate, just leave. */
4451 if (rcu_fanout_leaf == RCU_FANOUT_LEAF &&
4452 nr_cpu_ids == NR_CPUS)
4453 return;
4454 pr_info("Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n",
4455 rcu_fanout_leaf, nr_cpu_ids);
4456
4457 /*
4458 * The boot-time rcu_fanout_leaf parameter must be at least two
4459 * and cannot exceed the number of bits in the rcu_node masks.
4460 * Complain and fall back to the compile-time values if this
4461 * limit is exceeded.
4462 */
4463 if (rcu_fanout_leaf < 2 ||
4464 rcu_fanout_leaf > sizeof(unsigned long) * 8) {
4465 rcu_fanout_leaf = RCU_FANOUT_LEAF;
4466 WARN_ON(1);
4467 return;
4468 }
4469
4470 /*
4471 * Compute number of nodes that can be handled an rcu_node tree
4472 * with the given number of levels.
4473 */
4474 rcu_capacity[0] = rcu_fanout_leaf;
4475 for (i = 1; i < RCU_NUM_LVLS; i++)
4476 rcu_capacity[i] = rcu_capacity[i - 1] * RCU_FANOUT;
4477
4478 /*
4479 * The tree must be able to accommodate the configured number of CPUs.
4480 * If this limit is exceeded, fall back to the compile-time values.
4481 */
4482 if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1]) {
4483 rcu_fanout_leaf = RCU_FANOUT_LEAF;
4484 WARN_ON(1);
4485 return;
4486 }
4487
4488 /* Calculate the number of levels in the tree. */
4489 for (i = 0; nr_cpu_ids > rcu_capacity[i]; i++) {
4490 }
4491 rcu_num_lvls = i + 1;
4492
4493 /* Calculate the number of rcu_nodes at each level of the tree. */
4494 for (i = 0; i < rcu_num_lvls; i++) {
4495 int cap = rcu_capacity[(rcu_num_lvls - 1) - i];
4496 num_rcu_lvl[i] = DIV_ROUND_UP(nr_cpu_ids, cap);
4497 }
4498
4499 /* Calculate the total number of rcu_node structures. */
4500 rcu_num_nodes = 0;
4501 for (i = 0; i < rcu_num_lvls; i++)
4502 rcu_num_nodes += num_rcu_lvl[i];
4503 }
4504
4505 /*
4506 * Dump out the structure of the rcu_node combining tree associated
4507 * with the rcu_state structure.
4508 */
rcu_dump_rcu_node_tree(void)4509 static void __init rcu_dump_rcu_node_tree(void)
4510 {
4511 int level = 0;
4512 struct rcu_node *rnp;
4513
4514 pr_info("rcu_node tree layout dump\n");
4515 pr_info(" ");
4516 rcu_for_each_node_breadth_first(rnp) {
4517 if (rnp->level != level) {
4518 pr_cont("\n");
4519 pr_info(" ");
4520 level = rnp->level;
4521 }
4522 pr_cont("%d:%d ^%d ", rnp->grplo, rnp->grphi, rnp->grpnum);
4523 }
4524 pr_cont("\n");
4525 }
4526
4527 struct workqueue_struct *rcu_gp_wq;
4528 struct workqueue_struct *rcu_par_gp_wq;
4529
kfree_rcu_batch_init(void)4530 static void __init kfree_rcu_batch_init(void)
4531 {
4532 int cpu;
4533 int i;
4534
4535 for_each_possible_cpu(cpu) {
4536 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
4537
4538 for (i = 0; i < KFREE_N_BATCHES; i++) {
4539 INIT_RCU_WORK(&krcp->krw_arr[i].rcu_work, kfree_rcu_work);
4540 krcp->krw_arr[i].krcp = krcp;
4541 }
4542
4543 INIT_DELAYED_WORK(&krcp->monitor_work, kfree_rcu_monitor);
4544 INIT_WORK(&krcp->page_cache_work, fill_page_cache_func);
4545 krcp->initialized = true;
4546 }
4547 if (register_shrinker(&kfree_rcu_shrinker))
4548 pr_err("Failed to register kfree_rcu() shrinker!\n");
4549 }
4550
rcu_init(void)4551 void __init rcu_init(void)
4552 {
4553 int cpu;
4554
4555 rcu_early_boot_tests();
4556
4557 kfree_rcu_batch_init();
4558 rcu_bootup_announce();
4559 rcu_init_geometry();
4560 rcu_init_one();
4561 if (dump_tree)
4562 rcu_dump_rcu_node_tree();
4563 if (use_softirq)
4564 open_softirq(RCU_SOFTIRQ, rcu_core_si);
4565
4566 /*
4567 * We don't need protection against CPU-hotplug here because
4568 * this is called early in boot, before either interrupts
4569 * or the scheduler are operational.
4570 */
4571 pm_notifier(rcu_pm_notify, 0);
4572 for_each_online_cpu(cpu) {
4573 rcutree_prepare_cpu(cpu);
4574 rcu_cpu_starting(cpu);
4575 rcutree_online_cpu(cpu);
4576 }
4577
4578 /* Create workqueue for expedited GPs and for Tree SRCU. */
4579 rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM, 0);
4580 WARN_ON(!rcu_gp_wq);
4581 rcu_par_gp_wq = alloc_workqueue("rcu_par_gp", WQ_MEM_RECLAIM, 0);
4582 WARN_ON(!rcu_par_gp_wq);
4583 srcu_init();
4584
4585 /* Fill in default value for rcutree.qovld boot parameter. */
4586 /* -After- the rcu_node ->lock fields are initialized! */
4587 if (qovld < 0)
4588 qovld_calc = DEFAULT_RCU_QOVLD_MULT * qhimark;
4589 else
4590 qovld_calc = qovld;
4591 }
4592
4593 #include "tree_stall.h"
4594 #include "tree_exp.h"
4595 #include "tree_plugin.h"
4596