1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
4 *
5 * Copyright IBM Corporation, 2008
6 *
7 * Authors: Dipankar Sarma <dipankar@in.ibm.com>
8 * Manfred Spraul <manfred@colorfullife.com>
9 * Paul E. McKenney <paulmck@linux.ibm.com>
10 *
11 * Based on the original work by Paul McKenney <paulmck@linux.ibm.com>
12 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
13 *
14 * For detailed explanation of Read-Copy Update mechanism see -
15 * Documentation/RCU
16 */
17
18 #define pr_fmt(fmt) "rcu: " fmt
19
20 #include <linux/types.h>
21 #include <linux/kernel.h>
22 #include <linux/init.h>
23 #include <linux/spinlock.h>
24 #include <linux/smp.h>
25 #include <linux/rcupdate_wait.h>
26 #include <linux/interrupt.h>
27 #include <linux/sched.h>
28 #include <linux/sched/debug.h>
29 #include <linux/nmi.h>
30 #include <linux/atomic.h>
31 #include <linux/bitops.h>
32 #include <linux/export.h>
33 #include <linux/completion.h>
34 #include <linux/kmemleak.h>
35 #include <linux/moduleparam.h>
36 #include <linux/panic.h>
37 #include <linux/panic_notifier.h>
38 #include <linux/percpu.h>
39 #include <linux/notifier.h>
40 #include <linux/cpu.h>
41 #include <linux/mutex.h>
42 #include <linux/time.h>
43 #include <linux/kernel_stat.h>
44 #include <linux/wait.h>
45 #include <linux/kthread.h>
46 #include <uapi/linux/sched/types.h>
47 #include <linux/prefetch.h>
48 #include <linux/delay.h>
49 #include <linux/random.h>
50 #include <linux/trace_events.h>
51 #include <linux/suspend.h>
52 #include <linux/ftrace.h>
53 #include <linux/tick.h>
54 #include <linux/sysrq.h>
55 #include <linux/kprobes.h>
56 #include <linux/gfp.h>
57 #include <linux/oom.h>
58 #include <linux/smpboot.h>
59 #include <linux/jiffies.h>
60 #include <linux/slab.h>
61 #include <linux/sched/isolation.h>
62 #include <linux/sched/clock.h>
63 #include <linux/vmalloc.h>
64 #include <linux/mm.h>
65 #include <linux/kasan.h>
66 #include "../time/tick-internal.h"
67
68 #include "tree.h"
69 #include "rcu.h"
70
71 #ifdef MODULE_PARAM_PREFIX
72 #undef MODULE_PARAM_PREFIX
73 #endif
74 #define MODULE_PARAM_PREFIX "rcutree."
75
76 /* Data structures. */
77
78 static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = {
79 .dynticks_nesting = 1,
80 .dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE,
81 .dynticks = ATOMIC_INIT(1),
82 #ifdef CONFIG_RCU_NOCB_CPU
83 .cblist.flags = SEGCBLIST_SOFTIRQ_ONLY,
84 #endif
85 };
86 static struct rcu_state rcu_state = {
87 .level = { &rcu_state.node[0] },
88 .gp_state = RCU_GP_IDLE,
89 .gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT,
90 .barrier_mutex = __MUTEX_INITIALIZER(rcu_state.barrier_mutex),
91 .name = RCU_NAME,
92 .abbr = RCU_ABBR,
93 .exp_mutex = __MUTEX_INITIALIZER(rcu_state.exp_mutex),
94 .exp_wake_mutex = __MUTEX_INITIALIZER(rcu_state.exp_wake_mutex),
95 .ofl_lock = __RAW_SPIN_LOCK_UNLOCKED(rcu_state.ofl_lock),
96 };
97
98 /* Dump rcu_node combining tree at boot to verify correct setup. */
99 static bool dump_tree;
100 module_param(dump_tree, bool, 0444);
101 /* By default, use RCU_SOFTIRQ instead of rcuc kthreads. */
102 static bool use_softirq = !IS_ENABLED(CONFIG_PREEMPT_RT);
103 #ifndef CONFIG_PREEMPT_RT
104 module_param(use_softirq, bool, 0444);
105 #endif
106 /* Control rcu_node-tree auto-balancing at boot time. */
107 static bool rcu_fanout_exact;
108 module_param(rcu_fanout_exact, bool, 0444);
109 /* Increase (but not decrease) the RCU_FANOUT_LEAF at boot time. */
110 static int rcu_fanout_leaf = RCU_FANOUT_LEAF;
111 module_param(rcu_fanout_leaf, int, 0444);
112 int rcu_num_lvls __read_mostly = RCU_NUM_LVLS;
113 /* Number of rcu_nodes at specified level. */
114 int num_rcu_lvl[] = NUM_RCU_LVL_INIT;
115 int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
116
117 /*
118 * The rcu_scheduler_active variable is initialized to the value
119 * RCU_SCHEDULER_INACTIVE and transitions RCU_SCHEDULER_INIT just before the
120 * first task is spawned. So when this variable is RCU_SCHEDULER_INACTIVE,
121 * RCU can assume that there is but one task, allowing RCU to (for example)
122 * optimize synchronize_rcu() to a simple barrier(). When this variable
123 * is RCU_SCHEDULER_INIT, RCU must actually do all the hard work required
124 * to detect real grace periods. This variable is also used to suppress
125 * boot-time false positives from lockdep-RCU error checking. Finally, it
126 * transitions from RCU_SCHEDULER_INIT to RCU_SCHEDULER_RUNNING after RCU
127 * is fully initialized, including all of its kthreads having been spawned.
128 */
129 int rcu_scheduler_active __read_mostly;
130 EXPORT_SYMBOL_GPL(rcu_scheduler_active);
131
132 /*
133 * The rcu_scheduler_fully_active variable transitions from zero to one
134 * during the early_initcall() processing, which is after the scheduler
135 * is capable of creating new tasks. So RCU processing (for example,
136 * creating tasks for RCU priority boosting) must be delayed until after
137 * rcu_scheduler_fully_active transitions from zero to one. We also
138 * currently delay invocation of any RCU callbacks until after this point.
139 *
140 * It might later prove better for people registering RCU callbacks during
141 * early boot to take responsibility for these callbacks, but one step at
142 * a time.
143 */
144 static int rcu_scheduler_fully_active __read_mostly;
145
146 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
147 unsigned long gps, unsigned long flags);
148 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
149 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
150 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
151 static void invoke_rcu_core(void);
152 static void rcu_report_exp_rdp(struct rcu_data *rdp);
153 static void sync_sched_exp_online_cleanup(int cpu);
154 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp);
155 static bool rcu_rdp_is_offloaded(struct rcu_data *rdp);
156
157 /* rcuc/rcub kthread realtime priority */
158 static int kthread_prio = IS_ENABLED(CONFIG_RCU_BOOST) ? 1 : 0;
159 module_param(kthread_prio, int, 0444);
160
161 /* Delay in jiffies for grace-period initialization delays, debug only. */
162
163 static int gp_preinit_delay;
164 module_param(gp_preinit_delay, int, 0444);
165 static int gp_init_delay;
166 module_param(gp_init_delay, int, 0444);
167 static int gp_cleanup_delay;
168 module_param(gp_cleanup_delay, int, 0444);
169
170 // Add delay to rcu_read_unlock() for strict grace periods.
171 static int rcu_unlock_delay;
172 #ifdef CONFIG_RCU_STRICT_GRACE_PERIOD
173 module_param(rcu_unlock_delay, int, 0444);
174 #endif
175
176 /*
177 * This rcu parameter is runtime-read-only. It reflects
178 * a minimum allowed number of objects which can be cached
179 * per-CPU. Object size is equal to one page. This value
180 * can be changed at boot time.
181 */
182 static int rcu_min_cached_objs = 5;
183 module_param(rcu_min_cached_objs, int, 0444);
184
185 // A page shrinker can ask for pages to be freed to make them
186 // available for other parts of the system. This usually happens
187 // under low memory conditions, and in that case we should also
188 // defer page-cache filling for a short time period.
189 //
190 // The default value is 5 seconds, which is long enough to reduce
191 // interference with the shrinker while it asks other systems to
192 // drain their caches.
193 static int rcu_delay_page_cache_fill_msec = 5000;
194 module_param(rcu_delay_page_cache_fill_msec, int, 0444);
195
196 /* Retrieve RCU kthreads priority for rcutorture */
rcu_get_gp_kthreads_prio(void)197 int rcu_get_gp_kthreads_prio(void)
198 {
199 return kthread_prio;
200 }
201 EXPORT_SYMBOL_GPL(rcu_get_gp_kthreads_prio);
202
203 /*
204 * Number of grace periods between delays, normalized by the duration of
205 * the delay. The longer the delay, the more the grace periods between
206 * each delay. The reason for this normalization is that it means that,
207 * for non-zero delays, the overall slowdown of grace periods is constant
208 * regardless of the duration of the delay. This arrangement balances
209 * the need for long delays to increase some race probabilities with the
210 * need for fast grace periods to increase other race probabilities.
211 */
212 #define PER_RCU_NODE_PERIOD 3 /* Number of grace periods between delays for debugging. */
213
214 /*
215 * Compute the mask of online CPUs for the specified rcu_node structure.
216 * This will not be stable unless the rcu_node structure's ->lock is
217 * held, but the bit corresponding to the current CPU will be stable
218 * in most contexts.
219 */
rcu_rnp_online_cpus(struct rcu_node * rnp)220 static unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
221 {
222 return READ_ONCE(rnp->qsmaskinitnext);
223 }
224
225 /*
226 * Return true if an RCU grace period is in progress. The READ_ONCE()s
227 * permit this function to be invoked without holding the root rcu_node
228 * structure's ->lock, but of course results can be subject to change.
229 */
rcu_gp_in_progress(void)230 static int rcu_gp_in_progress(void)
231 {
232 return rcu_seq_state(rcu_seq_current(&rcu_state.gp_seq));
233 }
234
235 /*
236 * Return the number of callbacks queued on the specified CPU.
237 * Handles both the nocbs and normal cases.
238 */
rcu_get_n_cbs_cpu(int cpu)239 static long rcu_get_n_cbs_cpu(int cpu)
240 {
241 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
242
243 if (rcu_segcblist_is_enabled(&rdp->cblist))
244 return rcu_segcblist_n_cbs(&rdp->cblist);
245 return 0;
246 }
247
rcu_softirq_qs(void)248 void rcu_softirq_qs(void)
249 {
250 rcu_qs();
251 rcu_preempt_deferred_qs(current);
252 rcu_tasks_qs(current, false);
253 }
254
255 /*
256 * Increment the current CPU's rcu_data structure's ->dynticks field
257 * with ordering. Return the new value.
258 */
rcu_dynticks_inc(int incby)259 static noinline noinstr unsigned long rcu_dynticks_inc(int incby)
260 {
261 return arch_atomic_add_return(incby, this_cpu_ptr(&rcu_data.dynticks));
262 }
263
264 /*
265 * Record entry into an extended quiescent state. This is only to be
266 * called when not already in an extended quiescent state, that is,
267 * RCU is watching prior to the call to this function and is no longer
268 * watching upon return.
269 */
rcu_dynticks_eqs_enter(void)270 static noinstr void rcu_dynticks_eqs_enter(void)
271 {
272 int seq;
273
274 /*
275 * CPUs seeing atomic_add_return() must see prior RCU read-side
276 * critical sections, and we also must force ordering with the
277 * next idle sojourn.
278 */
279 rcu_dynticks_task_trace_enter(); // Before ->dynticks update!
280 seq = rcu_dynticks_inc(1);
281 // RCU is no longer watching. Better be in extended quiescent state!
282 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && (seq & 0x1));
283 }
284
285 /*
286 * Record exit from an extended quiescent state. This is only to be
287 * called from an extended quiescent state, that is, RCU is not watching
288 * prior to the call to this function and is watching upon return.
289 */
rcu_dynticks_eqs_exit(void)290 static noinstr void rcu_dynticks_eqs_exit(void)
291 {
292 int seq;
293
294 /*
295 * CPUs seeing atomic_add_return() must see prior idle sojourns,
296 * and we also must force ordering with the next RCU read-side
297 * critical section.
298 */
299 seq = rcu_dynticks_inc(1);
300 // RCU is now watching. Better not be in an extended quiescent state!
301 rcu_dynticks_task_trace_exit(); // After ->dynticks update!
302 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !(seq & 0x1));
303 }
304
305 /*
306 * Reset the current CPU's ->dynticks counter to indicate that the
307 * newly onlined CPU is no longer in an extended quiescent state.
308 * This will either leave the counter unchanged, or increment it
309 * to the next non-quiescent value.
310 *
311 * The non-atomic test/increment sequence works because the upper bits
312 * of the ->dynticks counter are manipulated only by the corresponding CPU,
313 * or when the corresponding CPU is offline.
314 */
rcu_dynticks_eqs_online(void)315 static void rcu_dynticks_eqs_online(void)
316 {
317 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
318
319 if (atomic_read(&rdp->dynticks) & 0x1)
320 return;
321 rcu_dynticks_inc(1);
322 }
323
324 /*
325 * Is the current CPU in an extended quiescent state?
326 *
327 * No ordering, as we are sampling CPU-local information.
328 */
rcu_dynticks_curr_cpu_in_eqs(void)329 static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void)
330 {
331 return !(arch_atomic_read(this_cpu_ptr(&rcu_data.dynticks)) & 0x1);
332 }
333
334 /*
335 * Snapshot the ->dynticks counter with full ordering so as to allow
336 * stable comparison of this counter with past and future snapshots.
337 */
rcu_dynticks_snap(struct rcu_data * rdp)338 static int rcu_dynticks_snap(struct rcu_data *rdp)
339 {
340 smp_mb(); // Fundamental RCU ordering guarantee.
341 return atomic_read_acquire(&rdp->dynticks);
342 }
343
344 /*
345 * Return true if the snapshot returned from rcu_dynticks_snap()
346 * indicates that RCU is in an extended quiescent state.
347 */
rcu_dynticks_in_eqs(int snap)348 static bool rcu_dynticks_in_eqs(int snap)
349 {
350 return !(snap & 0x1);
351 }
352
353 /* Return true if the specified CPU is currently idle from an RCU viewpoint. */
rcu_is_idle_cpu(int cpu)354 bool rcu_is_idle_cpu(int cpu)
355 {
356 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
357
358 return rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp));
359 }
360
361 /*
362 * Return true if the CPU corresponding to the specified rcu_data
363 * structure has spent some time in an extended quiescent state since
364 * rcu_dynticks_snap() returned the specified snapshot.
365 */
rcu_dynticks_in_eqs_since(struct rcu_data * rdp,int snap)366 static bool rcu_dynticks_in_eqs_since(struct rcu_data *rdp, int snap)
367 {
368 return snap != rcu_dynticks_snap(rdp);
369 }
370
371 /*
372 * Return true if the referenced integer is zero while the specified
373 * CPU remains within a single extended quiescent state.
374 */
rcu_dynticks_zero_in_eqs(int cpu,int * vp)375 bool rcu_dynticks_zero_in_eqs(int cpu, int *vp)
376 {
377 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
378 int snap;
379
380 // If not quiescent, force back to earlier extended quiescent state.
381 snap = atomic_read(&rdp->dynticks) & ~0x1;
382
383 smp_rmb(); // Order ->dynticks and *vp reads.
384 if (READ_ONCE(*vp))
385 return false; // Non-zero, so report failure;
386 smp_rmb(); // Order *vp read and ->dynticks re-read.
387
388 // If still in the same extended quiescent state, we are good!
389 return snap == atomic_read(&rdp->dynticks);
390 }
391
392 /*
393 * Let the RCU core know that this CPU has gone through the scheduler,
394 * which is a quiescent state. This is called when the need for a
395 * quiescent state is urgent, so we burn an atomic operation and full
396 * memory barriers to let the RCU core know about it, regardless of what
397 * this CPU might (or might not) do in the near future.
398 *
399 * We inform the RCU core by emulating a zero-duration dyntick-idle period.
400 *
401 * The caller must have disabled interrupts and must not be idle.
402 */
rcu_momentary_dyntick_idle(void)403 notrace void rcu_momentary_dyntick_idle(void)
404 {
405 int seq;
406
407 raw_cpu_write(rcu_data.rcu_need_heavy_qs, false);
408 seq = rcu_dynticks_inc(2);
409 /* It is illegal to call this from idle state. */
410 WARN_ON_ONCE(!(seq & 0x1));
411 rcu_preempt_deferred_qs(current);
412 }
413 EXPORT_SYMBOL_GPL(rcu_momentary_dyntick_idle);
414
415 /**
416 * rcu_is_cpu_rrupt_from_idle - see if 'interrupted' from idle
417 *
418 * If the current CPU is idle and running at a first-level (not nested)
419 * interrupt, or directly, from idle, return true.
420 *
421 * The caller must have at least disabled IRQs.
422 */
rcu_is_cpu_rrupt_from_idle(void)423 static int rcu_is_cpu_rrupt_from_idle(void)
424 {
425 long nesting;
426
427 /*
428 * Usually called from the tick; but also used from smp_function_call()
429 * for expedited grace periods. This latter can result in running from
430 * the idle task, instead of an actual IPI.
431 */
432 lockdep_assert_irqs_disabled();
433
434 /* Check for counter underflows */
435 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) < 0,
436 "RCU dynticks_nesting counter underflow!");
437 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) <= 0,
438 "RCU dynticks_nmi_nesting counter underflow/zero!");
439
440 /* Are we at first interrupt nesting level? */
441 nesting = __this_cpu_read(rcu_data.dynticks_nmi_nesting);
442 if (nesting > 1)
443 return false;
444
445 /*
446 * If we're not in an interrupt, we must be in the idle task!
447 */
448 WARN_ON_ONCE(!nesting && !is_idle_task(current));
449
450 /* Does CPU appear to be idle from an RCU standpoint? */
451 return __this_cpu_read(rcu_data.dynticks_nesting) == 0;
452 }
453
454 #define DEFAULT_RCU_BLIMIT (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 1000 : 10)
455 // Maximum callbacks per rcu_do_batch ...
456 #define DEFAULT_MAX_RCU_BLIMIT 10000 // ... even during callback flood.
457 static long blimit = DEFAULT_RCU_BLIMIT;
458 #define DEFAULT_RCU_QHIMARK 10000 // If this many pending, ignore blimit.
459 static long qhimark = DEFAULT_RCU_QHIMARK;
460 #define DEFAULT_RCU_QLOMARK 100 // Once only this many pending, use blimit.
461 static long qlowmark = DEFAULT_RCU_QLOMARK;
462 #define DEFAULT_RCU_QOVLD_MULT 2
463 #define DEFAULT_RCU_QOVLD (DEFAULT_RCU_QOVLD_MULT * DEFAULT_RCU_QHIMARK)
464 static long qovld = DEFAULT_RCU_QOVLD; // If this many pending, hammer QS.
465 static long qovld_calc = -1; // No pre-initialization lock acquisitions!
466
467 module_param(blimit, long, 0444);
468 module_param(qhimark, long, 0444);
469 module_param(qlowmark, long, 0444);
470 module_param(qovld, long, 0444);
471
472 static ulong jiffies_till_first_fqs = IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 0 : ULONG_MAX;
473 static ulong jiffies_till_next_fqs = ULONG_MAX;
474 static bool rcu_kick_kthreads;
475 static int rcu_divisor = 7;
476 module_param(rcu_divisor, int, 0644);
477
478 /* Force an exit from rcu_do_batch() after 3 milliseconds. */
479 static long rcu_resched_ns = 3 * NSEC_PER_MSEC;
480 module_param(rcu_resched_ns, long, 0644);
481
482 /*
483 * How long the grace period must be before we start recruiting
484 * quiescent-state help from rcu_note_context_switch().
485 */
486 static ulong jiffies_till_sched_qs = ULONG_MAX;
487 module_param(jiffies_till_sched_qs, ulong, 0444);
488 static ulong jiffies_to_sched_qs; /* See adjust_jiffies_till_sched_qs(). */
489 module_param(jiffies_to_sched_qs, ulong, 0444); /* Display only! */
490
491 /*
492 * Make sure that we give the grace-period kthread time to detect any
493 * idle CPUs before taking active measures to force quiescent states.
494 * However, don't go below 100 milliseconds, adjusted upwards for really
495 * large systems.
496 */
adjust_jiffies_till_sched_qs(void)497 static void adjust_jiffies_till_sched_qs(void)
498 {
499 unsigned long j;
500
501 /* If jiffies_till_sched_qs was specified, respect the request. */
502 if (jiffies_till_sched_qs != ULONG_MAX) {
503 WRITE_ONCE(jiffies_to_sched_qs, jiffies_till_sched_qs);
504 return;
505 }
506 /* Otherwise, set to third fqs scan, but bound below on large system. */
507 j = READ_ONCE(jiffies_till_first_fqs) +
508 2 * READ_ONCE(jiffies_till_next_fqs);
509 if (j < HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV)
510 j = HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
511 pr_info("RCU calculated value of scheduler-enlistment delay is %ld jiffies.\n", j);
512 WRITE_ONCE(jiffies_to_sched_qs, j);
513 }
514
param_set_first_fqs_jiffies(const char * val,const struct kernel_param * kp)515 static int param_set_first_fqs_jiffies(const char *val, const struct kernel_param *kp)
516 {
517 ulong j;
518 int ret = kstrtoul(val, 0, &j);
519
520 if (!ret) {
521 WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : j);
522 adjust_jiffies_till_sched_qs();
523 }
524 return ret;
525 }
526
param_set_next_fqs_jiffies(const char * val,const struct kernel_param * kp)527 static int param_set_next_fqs_jiffies(const char *val, const struct kernel_param *kp)
528 {
529 ulong j;
530 int ret = kstrtoul(val, 0, &j);
531
532 if (!ret) {
533 WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : (j ?: 1));
534 adjust_jiffies_till_sched_qs();
535 }
536 return ret;
537 }
538
539 static const struct kernel_param_ops first_fqs_jiffies_ops = {
540 .set = param_set_first_fqs_jiffies,
541 .get = param_get_ulong,
542 };
543
544 static const struct kernel_param_ops next_fqs_jiffies_ops = {
545 .set = param_set_next_fqs_jiffies,
546 .get = param_get_ulong,
547 };
548
549 module_param_cb(jiffies_till_first_fqs, &first_fqs_jiffies_ops, &jiffies_till_first_fqs, 0644);
550 module_param_cb(jiffies_till_next_fqs, &next_fqs_jiffies_ops, &jiffies_till_next_fqs, 0644);
551 module_param(rcu_kick_kthreads, bool, 0644);
552
553 static void force_qs_rnp(int (*f)(struct rcu_data *rdp));
554 static int rcu_pending(int user);
555
556 /*
557 * Return the number of RCU GPs completed thus far for debug & stats.
558 */
rcu_get_gp_seq(void)559 unsigned long rcu_get_gp_seq(void)
560 {
561 return READ_ONCE(rcu_state.gp_seq);
562 }
563 EXPORT_SYMBOL_GPL(rcu_get_gp_seq);
564
565 /*
566 * Return the number of RCU expedited batches completed thus far for
567 * debug & stats. Odd numbers mean that a batch is in progress, even
568 * numbers mean idle. The value returned will thus be roughly double
569 * the cumulative batches since boot.
570 */
rcu_exp_batches_completed(void)571 unsigned long rcu_exp_batches_completed(void)
572 {
573 return rcu_state.expedited_sequence;
574 }
575 EXPORT_SYMBOL_GPL(rcu_exp_batches_completed);
576
577 /*
578 * Return the root node of the rcu_state structure.
579 */
rcu_get_root(void)580 static struct rcu_node *rcu_get_root(void)
581 {
582 return &rcu_state.node[0];
583 }
584
585 /*
586 * Send along grace-period-related data for rcutorture diagnostics.
587 */
rcutorture_get_gp_data(enum rcutorture_type test_type,int * flags,unsigned long * gp_seq)588 void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
589 unsigned long *gp_seq)
590 {
591 switch (test_type) {
592 case RCU_FLAVOR:
593 *flags = READ_ONCE(rcu_state.gp_flags);
594 *gp_seq = rcu_seq_current(&rcu_state.gp_seq);
595 break;
596 default:
597 break;
598 }
599 }
600 EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
601
602 /*
603 * Enter an RCU extended quiescent state, which can be either the
604 * idle loop or adaptive-tickless usermode execution.
605 *
606 * We crowbar the ->dynticks_nmi_nesting field to zero to allow for
607 * the possibility of usermode upcalls having messed up our count
608 * of interrupt nesting level during the prior busy period.
609 */
rcu_eqs_enter(bool user)610 static noinstr void rcu_eqs_enter(bool user)
611 {
612 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
613
614 WARN_ON_ONCE(rdp->dynticks_nmi_nesting != DYNTICK_IRQ_NONIDLE);
615 WRITE_ONCE(rdp->dynticks_nmi_nesting, 0);
616 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
617 rdp->dynticks_nesting == 0);
618 if (rdp->dynticks_nesting != 1) {
619 // RCU will still be watching, so just do accounting and leave.
620 rdp->dynticks_nesting--;
621 return;
622 }
623
624 lockdep_assert_irqs_disabled();
625 instrumentation_begin();
626 trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, atomic_read(&rdp->dynticks));
627 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
628 rcu_prepare_for_idle();
629 rcu_preempt_deferred_qs(current);
630
631 // instrumentation for the noinstr rcu_dynticks_eqs_enter()
632 instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
633
634 instrumentation_end();
635 WRITE_ONCE(rdp->dynticks_nesting, 0); /* Avoid irq-access tearing. */
636 // RCU is watching here ...
637 rcu_dynticks_eqs_enter();
638 // ... but is no longer watching here.
639 rcu_dynticks_task_enter();
640 }
641
642 /**
643 * rcu_idle_enter - inform RCU that current CPU is entering idle
644 *
645 * Enter idle mode, in other words, -leave- the mode in which RCU
646 * read-side critical sections can occur. (Though RCU read-side
647 * critical sections can occur in irq handlers in idle, a possibility
648 * handled by irq_enter() and irq_exit().)
649 *
650 * If you add or remove a call to rcu_idle_enter(), be sure to test with
651 * CONFIG_RCU_EQS_DEBUG=y.
652 */
rcu_idle_enter(void)653 void rcu_idle_enter(void)
654 {
655 lockdep_assert_irqs_disabled();
656 rcu_eqs_enter(false);
657 }
658 EXPORT_SYMBOL_GPL(rcu_idle_enter);
659
660 #ifdef CONFIG_NO_HZ_FULL
661
662 #if !defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK)
663 /*
664 * An empty function that will trigger a reschedule on
665 * IRQ tail once IRQs get re-enabled on userspace/guest resume.
666 */
late_wakeup_func(struct irq_work * work)667 static void late_wakeup_func(struct irq_work *work)
668 {
669 }
670
671 static DEFINE_PER_CPU(struct irq_work, late_wakeup_work) =
672 IRQ_WORK_INIT(late_wakeup_func);
673
674 /*
675 * If either:
676 *
677 * 1) the task is about to enter in guest mode and $ARCH doesn't support KVM generic work
678 * 2) the task is about to enter in user mode and $ARCH doesn't support generic entry.
679 *
680 * In these cases the late RCU wake ups aren't supported in the resched loops and our
681 * last resort is to fire a local irq_work that will trigger a reschedule once IRQs
682 * get re-enabled again.
683 */
rcu_irq_work_resched(void)684 noinstr static void rcu_irq_work_resched(void)
685 {
686 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
687
688 if (IS_ENABLED(CONFIG_GENERIC_ENTRY) && !(current->flags & PF_VCPU))
689 return;
690
691 if (IS_ENABLED(CONFIG_KVM_XFER_TO_GUEST_WORK) && (current->flags & PF_VCPU))
692 return;
693
694 instrumentation_begin();
695 if (do_nocb_deferred_wakeup(rdp) && need_resched()) {
696 irq_work_queue(this_cpu_ptr(&late_wakeup_work));
697 }
698 instrumentation_end();
699 }
700
701 #else
rcu_irq_work_resched(void)702 static inline void rcu_irq_work_resched(void) { }
703 #endif
704
705 /**
706 * rcu_user_enter - inform RCU that we are resuming userspace.
707 *
708 * Enter RCU idle mode right before resuming userspace. No use of RCU
709 * is permitted between this call and rcu_user_exit(). This way the
710 * CPU doesn't need to maintain the tick for RCU maintenance purposes
711 * when the CPU runs in userspace.
712 *
713 * If you add or remove a call to rcu_user_enter(), be sure to test with
714 * CONFIG_RCU_EQS_DEBUG=y.
715 */
rcu_user_enter(void)716 noinstr void rcu_user_enter(void)
717 {
718 lockdep_assert_irqs_disabled();
719
720 /*
721 * Other than generic entry implementation, we may be past the last
722 * rescheduling opportunity in the entry code. Trigger a self IPI
723 * that will fire and reschedule once we resume in user/guest mode.
724 */
725 rcu_irq_work_resched();
726 rcu_eqs_enter(true);
727 }
728
729 #endif /* CONFIG_NO_HZ_FULL */
730
731 /**
732 * rcu_nmi_exit - inform RCU of exit from NMI context
733 *
734 * If we are returning from the outermost NMI handler that interrupted an
735 * RCU-idle period, update rdp->dynticks and rdp->dynticks_nmi_nesting
736 * to let the RCU grace-period handling know that the CPU is back to
737 * being RCU-idle.
738 *
739 * If you add or remove a call to rcu_nmi_exit(), be sure to test
740 * with CONFIG_RCU_EQS_DEBUG=y.
741 */
rcu_nmi_exit(void)742 noinstr void rcu_nmi_exit(void)
743 {
744 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
745
746 instrumentation_begin();
747 /*
748 * Check for ->dynticks_nmi_nesting underflow and bad ->dynticks.
749 * (We are exiting an NMI handler, so RCU better be paying attention
750 * to us!)
751 */
752 WARN_ON_ONCE(rdp->dynticks_nmi_nesting <= 0);
753 WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs());
754
755 /*
756 * If the nesting level is not 1, the CPU wasn't RCU-idle, so
757 * leave it in non-RCU-idle state.
758 */
759 if (rdp->dynticks_nmi_nesting != 1) {
760 trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2,
761 atomic_read(&rdp->dynticks));
762 WRITE_ONCE(rdp->dynticks_nmi_nesting, /* No store tearing. */
763 rdp->dynticks_nmi_nesting - 2);
764 instrumentation_end();
765 return;
766 }
767
768 /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
769 trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, atomic_read(&rdp->dynticks));
770 WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */
771
772 if (!in_nmi())
773 rcu_prepare_for_idle();
774
775 // instrumentation for the noinstr rcu_dynticks_eqs_enter()
776 instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
777 instrumentation_end();
778
779 // RCU is watching here ...
780 rcu_dynticks_eqs_enter();
781 // ... but is no longer watching here.
782
783 if (!in_nmi())
784 rcu_dynticks_task_enter();
785 }
786
787 /**
788 * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle
789 *
790 * Exit from an interrupt handler, which might possibly result in entering
791 * idle mode, in other words, leaving the mode in which read-side critical
792 * sections can occur. The caller must have disabled interrupts.
793 *
794 * This code assumes that the idle loop never does anything that might
795 * result in unbalanced calls to irq_enter() and irq_exit(). If your
796 * architecture's idle loop violates this assumption, RCU will give you what
797 * you deserve, good and hard. But very infrequently and irreproducibly.
798 *
799 * Use things like work queues to work around this limitation.
800 *
801 * You have been warned.
802 *
803 * If you add or remove a call to rcu_irq_exit(), be sure to test with
804 * CONFIG_RCU_EQS_DEBUG=y.
805 */
rcu_irq_exit(void)806 void noinstr rcu_irq_exit(void)
807 {
808 lockdep_assert_irqs_disabled();
809 rcu_nmi_exit();
810 }
811
812 #ifdef CONFIG_PROVE_RCU
813 /**
814 * rcu_irq_exit_check_preempt - Validate that scheduling is possible
815 */
rcu_irq_exit_check_preempt(void)816 void rcu_irq_exit_check_preempt(void)
817 {
818 lockdep_assert_irqs_disabled();
819
820 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) <= 0,
821 "RCU dynticks_nesting counter underflow/zero!");
822 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) !=
823 DYNTICK_IRQ_NONIDLE,
824 "Bad RCU dynticks_nmi_nesting counter\n");
825 RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
826 "RCU in extended quiescent state!");
827 }
828 #endif /* #ifdef CONFIG_PROVE_RCU */
829
830 /*
831 * Wrapper for rcu_irq_exit() where interrupts are enabled.
832 *
833 * If you add or remove a call to rcu_irq_exit_irqson(), be sure to test
834 * with CONFIG_RCU_EQS_DEBUG=y.
835 */
rcu_irq_exit_irqson(void)836 void rcu_irq_exit_irqson(void)
837 {
838 unsigned long flags;
839
840 local_irq_save(flags);
841 rcu_irq_exit();
842 local_irq_restore(flags);
843 }
844
845 /*
846 * Exit an RCU extended quiescent state, which can be either the
847 * idle loop or adaptive-tickless usermode execution.
848 *
849 * We crowbar the ->dynticks_nmi_nesting field to DYNTICK_IRQ_NONIDLE to
850 * allow for the possibility of usermode upcalls messing up our count of
851 * interrupt nesting level during the busy period that is just now starting.
852 */
rcu_eqs_exit(bool user)853 static void noinstr rcu_eqs_exit(bool user)
854 {
855 struct rcu_data *rdp;
856 long oldval;
857
858 lockdep_assert_irqs_disabled();
859 rdp = this_cpu_ptr(&rcu_data);
860 oldval = rdp->dynticks_nesting;
861 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0);
862 if (oldval) {
863 // RCU was already watching, so just do accounting and leave.
864 rdp->dynticks_nesting++;
865 return;
866 }
867 rcu_dynticks_task_exit();
868 // RCU is not watching here ...
869 rcu_dynticks_eqs_exit();
870 // ... but is watching here.
871 instrumentation_begin();
872
873 // instrumentation for the noinstr rcu_dynticks_eqs_exit()
874 instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
875
876 rcu_cleanup_after_idle();
877 trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, atomic_read(&rdp->dynticks));
878 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
879 WRITE_ONCE(rdp->dynticks_nesting, 1);
880 WARN_ON_ONCE(rdp->dynticks_nmi_nesting);
881 WRITE_ONCE(rdp->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE);
882 instrumentation_end();
883 }
884
885 /**
886 * rcu_idle_exit - inform RCU that current CPU is leaving idle
887 *
888 * Exit idle mode, in other words, -enter- the mode in which RCU
889 * read-side critical sections can occur.
890 *
891 * If you add or remove a call to rcu_idle_exit(), be sure to test with
892 * CONFIG_RCU_EQS_DEBUG=y.
893 */
rcu_idle_exit(void)894 void rcu_idle_exit(void)
895 {
896 unsigned long flags;
897
898 local_irq_save(flags);
899 rcu_eqs_exit(false);
900 local_irq_restore(flags);
901 }
902 EXPORT_SYMBOL_GPL(rcu_idle_exit);
903
904 #ifdef CONFIG_NO_HZ_FULL
905 /**
906 * rcu_user_exit - inform RCU that we are exiting userspace.
907 *
908 * Exit RCU idle mode while entering the kernel because it can
909 * run a RCU read side critical section anytime.
910 *
911 * If you add or remove a call to rcu_user_exit(), be sure to test with
912 * CONFIG_RCU_EQS_DEBUG=y.
913 */
rcu_user_exit(void)914 void noinstr rcu_user_exit(void)
915 {
916 rcu_eqs_exit(true);
917 }
918
919 /**
920 * __rcu_irq_enter_check_tick - Enable scheduler tick on CPU if RCU needs it.
921 *
922 * The scheduler tick is not normally enabled when CPUs enter the kernel
923 * from nohz_full userspace execution. After all, nohz_full userspace
924 * execution is an RCU quiescent state and the time executing in the kernel
925 * is quite short. Except of course when it isn't. And it is not hard to
926 * cause a large system to spend tens of seconds or even minutes looping
927 * in the kernel, which can cause a number of problems, include RCU CPU
928 * stall warnings.
929 *
930 * Therefore, if a nohz_full CPU fails to report a quiescent state
931 * in a timely manner, the RCU grace-period kthread sets that CPU's
932 * ->rcu_urgent_qs flag with the expectation that the next interrupt or
933 * exception will invoke this function, which will turn on the scheduler
934 * tick, which will enable RCU to detect that CPU's quiescent states,
935 * for example, due to cond_resched() calls in CONFIG_PREEMPT=n kernels.
936 * The tick will be disabled once a quiescent state is reported for
937 * this CPU.
938 *
939 * Of course, in carefully tuned systems, there might never be an
940 * interrupt or exception. In that case, the RCU grace-period kthread
941 * will eventually cause one to happen. However, in less carefully
942 * controlled environments, this function allows RCU to get what it
943 * needs without creating otherwise useless interruptions.
944 */
__rcu_irq_enter_check_tick(void)945 void __rcu_irq_enter_check_tick(void)
946 {
947 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
948
949 // If we're here from NMI there's nothing to do.
950 if (in_nmi())
951 return;
952
953 RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
954 "Illegal rcu_irq_enter_check_tick() from extended quiescent state");
955
956 if (!tick_nohz_full_cpu(rdp->cpu) ||
957 !READ_ONCE(rdp->rcu_urgent_qs) ||
958 READ_ONCE(rdp->rcu_forced_tick)) {
959 // RCU doesn't need nohz_full help from this CPU, or it is
960 // already getting that help.
961 return;
962 }
963
964 // We get here only when not in an extended quiescent state and
965 // from interrupts (as opposed to NMIs). Therefore, (1) RCU is
966 // already watching and (2) The fact that we are in an interrupt
967 // handler and that the rcu_node lock is an irq-disabled lock
968 // prevents self-deadlock. So we can safely recheck under the lock.
969 // Note that the nohz_full state currently cannot change.
970 raw_spin_lock_rcu_node(rdp->mynode);
971 if (rdp->rcu_urgent_qs && !rdp->rcu_forced_tick) {
972 // A nohz_full CPU is in the kernel and RCU needs a
973 // quiescent state. Turn on the tick!
974 WRITE_ONCE(rdp->rcu_forced_tick, true);
975 tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
976 }
977 raw_spin_unlock_rcu_node(rdp->mynode);
978 }
979 NOKPROBE_SYMBOL(__rcu_irq_enter_check_tick);
980 #endif /* CONFIG_NO_HZ_FULL */
981
982 /**
983 * rcu_nmi_enter - inform RCU of entry to NMI context
984 *
985 * If the CPU was idle from RCU's viewpoint, update rdp->dynticks and
986 * rdp->dynticks_nmi_nesting to let the RCU grace-period handling know
987 * that the CPU is active. This implementation permits nested NMIs, as
988 * long as the nesting level does not overflow an int. (You will probably
989 * run out of stack space first.)
990 *
991 * If you add or remove a call to rcu_nmi_enter(), be sure to test
992 * with CONFIG_RCU_EQS_DEBUG=y.
993 */
rcu_nmi_enter(void)994 noinstr void rcu_nmi_enter(void)
995 {
996 long incby = 2;
997 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
998
999 /* Complain about underflow. */
1000 WARN_ON_ONCE(rdp->dynticks_nmi_nesting < 0);
1001
1002 /*
1003 * If idle from RCU viewpoint, atomically increment ->dynticks
1004 * to mark non-idle and increment ->dynticks_nmi_nesting by one.
1005 * Otherwise, increment ->dynticks_nmi_nesting by two. This means
1006 * if ->dynticks_nmi_nesting is equal to one, we are guaranteed
1007 * to be in the outermost NMI handler that interrupted an RCU-idle
1008 * period (observation due to Andy Lutomirski).
1009 */
1010 if (rcu_dynticks_curr_cpu_in_eqs()) {
1011
1012 if (!in_nmi())
1013 rcu_dynticks_task_exit();
1014
1015 // RCU is not watching here ...
1016 rcu_dynticks_eqs_exit();
1017 // ... but is watching here.
1018
1019 if (!in_nmi()) {
1020 instrumentation_begin();
1021 rcu_cleanup_after_idle();
1022 instrumentation_end();
1023 }
1024
1025 instrumentation_begin();
1026 // instrumentation for the noinstr rcu_dynticks_curr_cpu_in_eqs()
1027 instrument_atomic_read(&rdp->dynticks, sizeof(rdp->dynticks));
1028 // instrumentation for the noinstr rcu_dynticks_eqs_exit()
1029 instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
1030
1031 incby = 1;
1032 } else if (!in_nmi()) {
1033 instrumentation_begin();
1034 rcu_irq_enter_check_tick();
1035 } else {
1036 instrumentation_begin();
1037 }
1038
1039 trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="),
1040 rdp->dynticks_nmi_nesting,
1041 rdp->dynticks_nmi_nesting + incby, atomic_read(&rdp->dynticks));
1042 instrumentation_end();
1043 WRITE_ONCE(rdp->dynticks_nmi_nesting, /* Prevent store tearing. */
1044 rdp->dynticks_nmi_nesting + incby);
1045 barrier();
1046 }
1047
1048 /**
1049 * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
1050 *
1051 * Enter an interrupt handler, which might possibly result in exiting
1052 * idle mode, in other words, entering the mode in which read-side critical
1053 * sections can occur. The caller must have disabled interrupts.
1054 *
1055 * Note that the Linux kernel is fully capable of entering an interrupt
1056 * handler that it never exits, for example when doing upcalls to user mode!
1057 * This code assumes that the idle loop never does upcalls to user mode.
1058 * If your architecture's idle loop does do upcalls to user mode (or does
1059 * anything else that results in unbalanced calls to the irq_enter() and
1060 * irq_exit() functions), RCU will give you what you deserve, good and hard.
1061 * But very infrequently and irreproducibly.
1062 *
1063 * Use things like work queues to work around this limitation.
1064 *
1065 * You have been warned.
1066 *
1067 * If you add or remove a call to rcu_irq_enter(), be sure to test with
1068 * CONFIG_RCU_EQS_DEBUG=y.
1069 */
rcu_irq_enter(void)1070 noinstr void rcu_irq_enter(void)
1071 {
1072 lockdep_assert_irqs_disabled();
1073 rcu_nmi_enter();
1074 }
1075
1076 /*
1077 * Wrapper for rcu_irq_enter() where interrupts are enabled.
1078 *
1079 * If you add or remove a call to rcu_irq_enter_irqson(), be sure to test
1080 * with CONFIG_RCU_EQS_DEBUG=y.
1081 */
rcu_irq_enter_irqson(void)1082 void rcu_irq_enter_irqson(void)
1083 {
1084 unsigned long flags;
1085
1086 local_irq_save(flags);
1087 rcu_irq_enter();
1088 local_irq_restore(flags);
1089 }
1090
1091 /*
1092 * If any sort of urgency was applied to the current CPU (for example,
1093 * the scheduler-clock interrupt was enabled on a nohz_full CPU) in order
1094 * to get to a quiescent state, disable it.
1095 */
rcu_disable_urgency_upon_qs(struct rcu_data * rdp)1096 static void rcu_disable_urgency_upon_qs(struct rcu_data *rdp)
1097 {
1098 raw_lockdep_assert_held_rcu_node(rdp->mynode);
1099 WRITE_ONCE(rdp->rcu_urgent_qs, false);
1100 WRITE_ONCE(rdp->rcu_need_heavy_qs, false);
1101 if (tick_nohz_full_cpu(rdp->cpu) && rdp->rcu_forced_tick) {
1102 tick_dep_clear_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
1103 WRITE_ONCE(rdp->rcu_forced_tick, false);
1104 }
1105 }
1106
1107 /**
1108 * rcu_is_watching - see if RCU thinks that the current CPU is not idle
1109 *
1110 * Return true if RCU is watching the running CPU, which means that this
1111 * CPU can safely enter RCU read-side critical sections. In other words,
1112 * if the current CPU is not in its idle loop or is in an interrupt or
1113 * NMI handler, return true.
1114 *
1115 * Make notrace because it can be called by the internal functions of
1116 * ftrace, and making this notrace removes unnecessary recursion calls.
1117 */
rcu_is_watching(void)1118 notrace bool rcu_is_watching(void)
1119 {
1120 bool ret;
1121
1122 preempt_disable_notrace();
1123 ret = !rcu_dynticks_curr_cpu_in_eqs();
1124 preempt_enable_notrace();
1125 return ret;
1126 }
1127 EXPORT_SYMBOL_GPL(rcu_is_watching);
1128
1129 /*
1130 * If a holdout task is actually running, request an urgent quiescent
1131 * state from its CPU. This is unsynchronized, so migrations can cause
1132 * the request to go to the wrong CPU. Which is OK, all that will happen
1133 * is that the CPU's next context switch will be a bit slower and next
1134 * time around this task will generate another request.
1135 */
rcu_request_urgent_qs_task(struct task_struct * t)1136 void rcu_request_urgent_qs_task(struct task_struct *t)
1137 {
1138 int cpu;
1139
1140 barrier();
1141 cpu = task_cpu(t);
1142 if (!task_curr(t))
1143 return; /* This task is not running on that CPU. */
1144 smp_store_release(per_cpu_ptr(&rcu_data.rcu_urgent_qs, cpu), true);
1145 }
1146
1147 #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
1148
1149 /*
1150 * Is the current CPU online as far as RCU is concerned?
1151 *
1152 * Disable preemption to avoid false positives that could otherwise
1153 * happen due to the current CPU number being sampled, this task being
1154 * preempted, its old CPU being taken offline, resuming on some other CPU,
1155 * then determining that its old CPU is now offline.
1156 *
1157 * Disable checking if in an NMI handler because we cannot safely
1158 * report errors from NMI handlers anyway. In addition, it is OK to use
1159 * RCU on an offline processor during initial boot, hence the check for
1160 * rcu_scheduler_fully_active.
1161 */
rcu_lockdep_current_cpu_online(void)1162 bool rcu_lockdep_current_cpu_online(void)
1163 {
1164 struct rcu_data *rdp;
1165 struct rcu_node *rnp;
1166 bool ret = false;
1167
1168 if (in_nmi() || !rcu_scheduler_fully_active)
1169 return true;
1170 preempt_disable_notrace();
1171 rdp = this_cpu_ptr(&rcu_data);
1172 rnp = rdp->mynode;
1173 if (rdp->grpmask & rcu_rnp_online_cpus(rnp) || READ_ONCE(rnp->ofl_seq) & 0x1)
1174 ret = true;
1175 preempt_enable_notrace();
1176 return ret;
1177 }
1178 EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
1179
1180 #endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */
1181
1182 /*
1183 * When trying to report a quiescent state on behalf of some other CPU,
1184 * it is our responsibility to check for and handle potential overflow
1185 * of the rcu_node ->gp_seq counter with respect to the rcu_data counters.
1186 * After all, the CPU might be in deep idle state, and thus executing no
1187 * code whatsoever.
1188 */
rcu_gpnum_ovf(struct rcu_node * rnp,struct rcu_data * rdp)1189 static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp)
1190 {
1191 raw_lockdep_assert_held_rcu_node(rnp);
1192 if (ULONG_CMP_LT(rcu_seq_current(&rdp->gp_seq) + ULONG_MAX / 4,
1193 rnp->gp_seq))
1194 WRITE_ONCE(rdp->gpwrap, true);
1195 if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq))
1196 rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4;
1197 }
1198
1199 /*
1200 * Snapshot the specified CPU's dynticks counter so that we can later
1201 * credit them with an implicit quiescent state. Return 1 if this CPU
1202 * is in dynticks idle mode, which is an extended quiescent state.
1203 */
dyntick_save_progress_counter(struct rcu_data * rdp)1204 static int dyntick_save_progress_counter(struct rcu_data *rdp)
1205 {
1206 rdp->dynticks_snap = rcu_dynticks_snap(rdp);
1207 if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) {
1208 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
1209 rcu_gpnum_ovf(rdp->mynode, rdp);
1210 return 1;
1211 }
1212 return 0;
1213 }
1214
1215 /*
1216 * Return true if the specified CPU has passed through a quiescent
1217 * state by virtue of being in or having passed through an dynticks
1218 * idle state since the last call to dyntick_save_progress_counter()
1219 * for this same CPU, or by virtue of having been offline.
1220 */
rcu_implicit_dynticks_qs(struct rcu_data * rdp)1221 static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
1222 {
1223 unsigned long jtsq;
1224 bool *rnhqp;
1225 bool *ruqp;
1226 struct rcu_node *rnp = rdp->mynode;
1227
1228 /*
1229 * If the CPU passed through or entered a dynticks idle phase with
1230 * no active irq/NMI handlers, then we can safely pretend that the CPU
1231 * already acknowledged the request to pass through a quiescent
1232 * state. Either way, that CPU cannot possibly be in an RCU
1233 * read-side critical section that started before the beginning
1234 * of the current RCU grace period.
1235 */
1236 if (rcu_dynticks_in_eqs_since(rdp, rdp->dynticks_snap)) {
1237 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
1238 rcu_gpnum_ovf(rnp, rdp);
1239 return 1;
1240 }
1241
1242 /*
1243 * Complain if a CPU that is considered to be offline from RCU's
1244 * perspective has not yet reported a quiescent state. After all,
1245 * the offline CPU should have reported a quiescent state during
1246 * the CPU-offline process, or, failing that, by rcu_gp_init()
1247 * if it ran concurrently with either the CPU going offline or the
1248 * last task on a leaf rcu_node structure exiting its RCU read-side
1249 * critical section while all CPUs corresponding to that structure
1250 * are offline. This added warning detects bugs in any of these
1251 * code paths.
1252 *
1253 * The rcu_node structure's ->lock is held here, which excludes
1254 * the relevant portions the CPU-hotplug code, the grace-period
1255 * initialization code, and the rcu_read_unlock() code paths.
1256 *
1257 * For more detail, please refer to the "Hotplug CPU" section
1258 * of RCU's Requirements documentation.
1259 */
1260 if (WARN_ON_ONCE(!(rdp->grpmask & rcu_rnp_online_cpus(rnp)))) {
1261 bool onl;
1262 struct rcu_node *rnp1;
1263
1264 pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n",
1265 __func__, rnp->grplo, rnp->grphi, rnp->level,
1266 (long)rnp->gp_seq, (long)rnp->completedqs);
1267 for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent)
1268 pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx ->rcu_gp_init_mask %#lx\n",
1269 __func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext, rnp1->rcu_gp_init_mask);
1270 onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp));
1271 pr_info("%s %d: %c online: %ld(%d) offline: %ld(%d)\n",
1272 __func__, rdp->cpu, ".o"[onl],
1273 (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags,
1274 (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags);
1275 return 1; /* Break things loose after complaining. */
1276 }
1277
1278 /*
1279 * A CPU running for an extended time within the kernel can
1280 * delay RCU grace periods: (1) At age jiffies_to_sched_qs,
1281 * set .rcu_urgent_qs, (2) At age 2*jiffies_to_sched_qs, set
1282 * both .rcu_need_heavy_qs and .rcu_urgent_qs. Note that the
1283 * unsynchronized assignments to the per-CPU rcu_need_heavy_qs
1284 * variable are safe because the assignments are repeated if this
1285 * CPU failed to pass through a quiescent state. This code
1286 * also checks .jiffies_resched in case jiffies_to_sched_qs
1287 * is set way high.
1288 */
1289 jtsq = READ_ONCE(jiffies_to_sched_qs);
1290 ruqp = per_cpu_ptr(&rcu_data.rcu_urgent_qs, rdp->cpu);
1291 rnhqp = per_cpu_ptr(&rcu_data.rcu_need_heavy_qs, rdp->cpu);
1292 if (!READ_ONCE(*rnhqp) &&
1293 (time_after(jiffies, rcu_state.gp_start + jtsq * 2) ||
1294 time_after(jiffies, rcu_state.jiffies_resched) ||
1295 rcu_state.cbovld)) {
1296 WRITE_ONCE(*rnhqp, true);
1297 /* Store rcu_need_heavy_qs before rcu_urgent_qs. */
1298 smp_store_release(ruqp, true);
1299 } else if (time_after(jiffies, rcu_state.gp_start + jtsq)) {
1300 WRITE_ONCE(*ruqp, true);
1301 }
1302
1303 /*
1304 * NO_HZ_FULL CPUs can run in-kernel without rcu_sched_clock_irq!
1305 * The above code handles this, but only for straight cond_resched().
1306 * And some in-kernel loops check need_resched() before calling
1307 * cond_resched(), which defeats the above code for CPUs that are
1308 * running in-kernel with scheduling-clock interrupts disabled.
1309 * So hit them over the head with the resched_cpu() hammer!
1310 */
1311 if (tick_nohz_full_cpu(rdp->cpu) &&
1312 (time_after(jiffies, READ_ONCE(rdp->last_fqs_resched) + jtsq * 3) ||
1313 rcu_state.cbovld)) {
1314 WRITE_ONCE(*ruqp, true);
1315 resched_cpu(rdp->cpu);
1316 WRITE_ONCE(rdp->last_fqs_resched, jiffies);
1317 }
1318
1319 /*
1320 * If more than halfway to RCU CPU stall-warning time, invoke
1321 * resched_cpu() more frequently to try to loosen things up a bit.
1322 * Also check to see if the CPU is getting hammered with interrupts,
1323 * but only once per grace period, just to keep the IPIs down to
1324 * a dull roar.
1325 */
1326 if (time_after(jiffies, rcu_state.jiffies_resched)) {
1327 if (time_after(jiffies,
1328 READ_ONCE(rdp->last_fqs_resched) + jtsq)) {
1329 resched_cpu(rdp->cpu);
1330 WRITE_ONCE(rdp->last_fqs_resched, jiffies);
1331 }
1332 if (IS_ENABLED(CONFIG_IRQ_WORK) &&
1333 !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
1334 (rnp->ffmask & rdp->grpmask)) {
1335 rdp->rcu_iw_pending = true;
1336 rdp->rcu_iw_gp_seq = rnp->gp_seq;
1337 irq_work_queue_on(&rdp->rcu_iw, rdp->cpu);
1338 }
1339 }
1340
1341 return 0;
1342 }
1343
1344 /* Trace-event wrapper function for trace_rcu_future_grace_period. */
trace_rcu_this_gp(struct rcu_node * rnp,struct rcu_data * rdp,unsigned long gp_seq_req,const char * s)1345 static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
1346 unsigned long gp_seq_req, const char *s)
1347 {
1348 trace_rcu_future_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
1349 gp_seq_req, rnp->level,
1350 rnp->grplo, rnp->grphi, s);
1351 }
1352
1353 /*
1354 * rcu_start_this_gp - Request the start of a particular grace period
1355 * @rnp_start: The leaf node of the CPU from which to start.
1356 * @rdp: The rcu_data corresponding to the CPU from which to start.
1357 * @gp_seq_req: The gp_seq of the grace period to start.
1358 *
1359 * Start the specified grace period, as needed to handle newly arrived
1360 * callbacks. The required future grace periods are recorded in each
1361 * rcu_node structure's ->gp_seq_needed field. Returns true if there
1362 * is reason to awaken the grace-period kthread.
1363 *
1364 * The caller must hold the specified rcu_node structure's ->lock, which
1365 * is why the caller is responsible for waking the grace-period kthread.
1366 *
1367 * Returns true if the GP thread needs to be awakened else false.
1368 */
rcu_start_this_gp(struct rcu_node * rnp_start,struct rcu_data * rdp,unsigned long gp_seq_req)1369 static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp,
1370 unsigned long gp_seq_req)
1371 {
1372 bool ret = false;
1373 struct rcu_node *rnp;
1374
1375 /*
1376 * Use funnel locking to either acquire the root rcu_node
1377 * structure's lock or bail out if the need for this grace period
1378 * has already been recorded -- or if that grace period has in
1379 * fact already started. If there is already a grace period in
1380 * progress in a non-leaf node, no recording is needed because the
1381 * end of the grace period will scan the leaf rcu_node structures.
1382 * Note that rnp_start->lock must not be released.
1383 */
1384 raw_lockdep_assert_held_rcu_node(rnp_start);
1385 trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, TPS("Startleaf"));
1386 for (rnp = rnp_start; 1; rnp = rnp->parent) {
1387 if (rnp != rnp_start)
1388 raw_spin_lock_rcu_node(rnp);
1389 if (ULONG_CMP_GE(rnp->gp_seq_needed, gp_seq_req) ||
1390 rcu_seq_started(&rnp->gp_seq, gp_seq_req) ||
1391 (rnp != rnp_start &&
1392 rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))) {
1393 trace_rcu_this_gp(rnp, rdp, gp_seq_req,
1394 TPS("Prestarted"));
1395 goto unlock_out;
1396 }
1397 WRITE_ONCE(rnp->gp_seq_needed, gp_seq_req);
1398 if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) {
1399 /*
1400 * We just marked the leaf or internal node, and a
1401 * grace period is in progress, which means that
1402 * rcu_gp_cleanup() will see the marking. Bail to
1403 * reduce contention.
1404 */
1405 trace_rcu_this_gp(rnp_start, rdp, gp_seq_req,
1406 TPS("Startedleaf"));
1407 goto unlock_out;
1408 }
1409 if (rnp != rnp_start && rnp->parent != NULL)
1410 raw_spin_unlock_rcu_node(rnp);
1411 if (!rnp->parent)
1412 break; /* At root, and perhaps also leaf. */
1413 }
1414
1415 /* If GP already in progress, just leave, otherwise start one. */
1416 if (rcu_gp_in_progress()) {
1417 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot"));
1418 goto unlock_out;
1419 }
1420 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot"));
1421 WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags | RCU_GP_FLAG_INIT);
1422 WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
1423 if (!READ_ONCE(rcu_state.gp_kthread)) {
1424 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread"));
1425 goto unlock_out;
1426 }
1427 trace_rcu_grace_period(rcu_state.name, data_race(rcu_state.gp_seq), TPS("newreq"));
1428 ret = true; /* Caller must wake GP kthread. */
1429 unlock_out:
1430 /* Push furthest requested GP to leaf node and rcu_data structure. */
1431 if (ULONG_CMP_LT(gp_seq_req, rnp->gp_seq_needed)) {
1432 WRITE_ONCE(rnp_start->gp_seq_needed, rnp->gp_seq_needed);
1433 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed);
1434 }
1435 if (rnp != rnp_start)
1436 raw_spin_unlock_rcu_node(rnp);
1437 return ret;
1438 }
1439
1440 /*
1441 * Clean up any old requests for the just-ended grace period. Also return
1442 * whether any additional grace periods have been requested.
1443 */
rcu_future_gp_cleanup(struct rcu_node * rnp)1444 static bool rcu_future_gp_cleanup(struct rcu_node *rnp)
1445 {
1446 bool needmore;
1447 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
1448
1449 needmore = ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed);
1450 if (!needmore)
1451 rnp->gp_seq_needed = rnp->gp_seq; /* Avoid counter wrap. */
1452 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq,
1453 needmore ? TPS("CleanupMore") : TPS("Cleanup"));
1454 return needmore;
1455 }
1456
1457 /*
1458 * Awaken the grace-period kthread. Don't do a self-awaken (unless in an
1459 * interrupt or softirq handler, in which case we just might immediately
1460 * sleep upon return, resulting in a grace-period hang), and don't bother
1461 * awakening when there is nothing for the grace-period kthread to do
1462 * (as in several CPUs raced to awaken, we lost), and finally don't try
1463 * to awaken a kthread that has not yet been created. If all those checks
1464 * are passed, track some debug information and awaken.
1465 *
1466 * So why do the self-wakeup when in an interrupt or softirq handler
1467 * in the grace-period kthread's context? Because the kthread might have
1468 * been interrupted just as it was going to sleep, and just after the final
1469 * pre-sleep check of the awaken condition. In this case, a wakeup really
1470 * is required, and is therefore supplied.
1471 */
rcu_gp_kthread_wake(void)1472 static void rcu_gp_kthread_wake(void)
1473 {
1474 struct task_struct *t = READ_ONCE(rcu_state.gp_kthread);
1475
1476 if ((current == t && !in_irq() && !in_serving_softirq()) ||
1477 !READ_ONCE(rcu_state.gp_flags) || !t)
1478 return;
1479 WRITE_ONCE(rcu_state.gp_wake_time, jiffies);
1480 WRITE_ONCE(rcu_state.gp_wake_seq, READ_ONCE(rcu_state.gp_seq));
1481 swake_up_one(&rcu_state.gp_wq);
1482 }
1483
1484 /*
1485 * If there is room, assign a ->gp_seq number to any callbacks on this
1486 * CPU that have not already been assigned. Also accelerate any callbacks
1487 * that were previously assigned a ->gp_seq number that has since proven
1488 * to be too conservative, which can happen if callbacks get assigned a
1489 * ->gp_seq number while RCU is idle, but with reference to a non-root
1490 * rcu_node structure. This function is idempotent, so it does not hurt
1491 * to call it repeatedly. Returns an flag saying that we should awaken
1492 * the RCU grace-period kthread.
1493 *
1494 * The caller must hold rnp->lock with interrupts disabled.
1495 */
rcu_accelerate_cbs(struct rcu_node * rnp,struct rcu_data * rdp)1496 static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
1497 {
1498 unsigned long gp_seq_req;
1499 bool ret = false;
1500
1501 rcu_lockdep_assert_cblist_protected(rdp);
1502 raw_lockdep_assert_held_rcu_node(rnp);
1503
1504 /* If no pending (not yet ready to invoke) callbacks, nothing to do. */
1505 if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1506 return false;
1507
1508 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPreAcc"));
1509
1510 /*
1511 * Callbacks are often registered with incomplete grace-period
1512 * information. Something about the fact that getting exact
1513 * information requires acquiring a global lock... RCU therefore
1514 * makes a conservative estimate of the grace period number at which
1515 * a given callback will become ready to invoke. The following
1516 * code checks this estimate and improves it when possible, thus
1517 * accelerating callback invocation to an earlier grace-period
1518 * number.
1519 */
1520 gp_seq_req = rcu_seq_snap(&rcu_state.gp_seq);
1521 if (rcu_segcblist_accelerate(&rdp->cblist, gp_seq_req))
1522 ret = rcu_start_this_gp(rnp, rdp, gp_seq_req);
1523
1524 /* Trace depending on how much we were able to accelerate. */
1525 if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL))
1526 trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccWaitCB"));
1527 else
1528 trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccReadyCB"));
1529
1530 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPostAcc"));
1531
1532 return ret;
1533 }
1534
1535 /*
1536 * Similar to rcu_accelerate_cbs(), but does not require that the leaf
1537 * rcu_node structure's ->lock be held. It consults the cached value
1538 * of ->gp_seq_needed in the rcu_data structure, and if that indicates
1539 * that a new grace-period request be made, invokes rcu_accelerate_cbs()
1540 * while holding the leaf rcu_node structure's ->lock.
1541 */
rcu_accelerate_cbs_unlocked(struct rcu_node * rnp,struct rcu_data * rdp)1542 static void rcu_accelerate_cbs_unlocked(struct rcu_node *rnp,
1543 struct rcu_data *rdp)
1544 {
1545 unsigned long c;
1546 bool needwake;
1547
1548 rcu_lockdep_assert_cblist_protected(rdp);
1549 c = rcu_seq_snap(&rcu_state.gp_seq);
1550 if (!READ_ONCE(rdp->gpwrap) && ULONG_CMP_GE(rdp->gp_seq_needed, c)) {
1551 /* Old request still live, so mark recent callbacks. */
1552 (void)rcu_segcblist_accelerate(&rdp->cblist, c);
1553 return;
1554 }
1555 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
1556 needwake = rcu_accelerate_cbs(rnp, rdp);
1557 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
1558 if (needwake)
1559 rcu_gp_kthread_wake();
1560 }
1561
1562 /*
1563 * Move any callbacks whose grace period has completed to the
1564 * RCU_DONE_TAIL sublist, then compact the remaining sublists and
1565 * assign ->gp_seq numbers to any callbacks in the RCU_NEXT_TAIL
1566 * sublist. This function is idempotent, so it does not hurt to
1567 * invoke it repeatedly. As long as it is not invoked -too- often...
1568 * Returns true if the RCU grace-period kthread needs to be awakened.
1569 *
1570 * The caller must hold rnp->lock with interrupts disabled.
1571 */
rcu_advance_cbs(struct rcu_node * rnp,struct rcu_data * rdp)1572 static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
1573 {
1574 rcu_lockdep_assert_cblist_protected(rdp);
1575 raw_lockdep_assert_held_rcu_node(rnp);
1576
1577 /* If no pending (not yet ready to invoke) callbacks, nothing to do. */
1578 if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1579 return false;
1580
1581 /*
1582 * Find all callbacks whose ->gp_seq numbers indicate that they
1583 * are ready to invoke, and put them into the RCU_DONE_TAIL sublist.
1584 */
1585 rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq);
1586
1587 /* Classify any remaining callbacks. */
1588 return rcu_accelerate_cbs(rnp, rdp);
1589 }
1590
1591 /*
1592 * Move and classify callbacks, but only if doing so won't require
1593 * that the RCU grace-period kthread be awakened.
1594 */
rcu_advance_cbs_nowake(struct rcu_node * rnp,struct rcu_data * rdp)1595 static void __maybe_unused rcu_advance_cbs_nowake(struct rcu_node *rnp,
1596 struct rcu_data *rdp)
1597 {
1598 rcu_lockdep_assert_cblist_protected(rdp);
1599 if (!rcu_seq_state(rcu_seq_current(&rnp->gp_seq)) || !raw_spin_trylock_rcu_node(rnp))
1600 return;
1601 // The grace period cannot end while we hold the rcu_node lock.
1602 if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))
1603 WARN_ON_ONCE(rcu_advance_cbs(rnp, rdp));
1604 raw_spin_unlock_rcu_node(rnp);
1605 }
1606
1607 /*
1608 * In CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels, attempt to generate a
1609 * quiescent state. This is intended to be invoked when the CPU notices
1610 * a new grace period.
1611 */
rcu_strict_gp_check_qs(void)1612 static void rcu_strict_gp_check_qs(void)
1613 {
1614 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) {
1615 rcu_read_lock();
1616 rcu_read_unlock();
1617 }
1618 }
1619
1620 /*
1621 * Update CPU-local rcu_data state to record the beginnings and ends of
1622 * grace periods. The caller must hold the ->lock of the leaf rcu_node
1623 * structure corresponding to the current CPU, and must have irqs disabled.
1624 * Returns true if the grace-period kthread needs to be awakened.
1625 */
__note_gp_changes(struct rcu_node * rnp,struct rcu_data * rdp)1626 static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp)
1627 {
1628 bool ret = false;
1629 bool need_qs;
1630 const bool offloaded = rcu_rdp_is_offloaded(rdp);
1631
1632 raw_lockdep_assert_held_rcu_node(rnp);
1633
1634 if (rdp->gp_seq == rnp->gp_seq)
1635 return false; /* Nothing to do. */
1636
1637 /* Handle the ends of any preceding grace periods first. */
1638 if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) ||
1639 unlikely(READ_ONCE(rdp->gpwrap))) {
1640 if (!offloaded)
1641 ret = rcu_advance_cbs(rnp, rdp); /* Advance CBs. */
1642 rdp->core_needs_qs = false;
1643 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuend"));
1644 } else {
1645 if (!offloaded)
1646 ret = rcu_accelerate_cbs(rnp, rdp); /* Recent CBs. */
1647 if (rdp->core_needs_qs)
1648 rdp->core_needs_qs = !!(rnp->qsmask & rdp->grpmask);
1649 }
1650
1651 /* Now handle the beginnings of any new-to-this-CPU grace periods. */
1652 if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) ||
1653 unlikely(READ_ONCE(rdp->gpwrap))) {
1654 /*
1655 * If the current grace period is waiting for this CPU,
1656 * set up to detect a quiescent state, otherwise don't
1657 * go looking for one.
1658 */
1659 trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, TPS("cpustart"));
1660 need_qs = !!(rnp->qsmask & rdp->grpmask);
1661 rdp->cpu_no_qs.b.norm = need_qs;
1662 rdp->core_needs_qs = need_qs;
1663 zero_cpu_stall_ticks(rdp);
1664 }
1665 rdp->gp_seq = rnp->gp_seq; /* Remember new grace-period state. */
1666 if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap)
1667 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed);
1668 WRITE_ONCE(rdp->gpwrap, false);
1669 rcu_gpnum_ovf(rnp, rdp);
1670 return ret;
1671 }
1672
note_gp_changes(struct rcu_data * rdp)1673 static void note_gp_changes(struct rcu_data *rdp)
1674 {
1675 unsigned long flags;
1676 bool needwake;
1677 struct rcu_node *rnp;
1678
1679 local_irq_save(flags);
1680 rnp = rdp->mynode;
1681 if ((rdp->gp_seq == rcu_seq_current(&rnp->gp_seq) &&
1682 !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */
1683 !raw_spin_trylock_rcu_node(rnp)) { /* irqs already off, so later. */
1684 local_irq_restore(flags);
1685 return;
1686 }
1687 needwake = __note_gp_changes(rnp, rdp);
1688 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1689 rcu_strict_gp_check_qs();
1690 if (needwake)
1691 rcu_gp_kthread_wake();
1692 }
1693
rcu_gp_slow(int delay)1694 static void rcu_gp_slow(int delay)
1695 {
1696 if (delay > 0 &&
1697 !(rcu_seq_ctr(rcu_state.gp_seq) %
1698 (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay)))
1699 schedule_timeout_idle(delay);
1700 }
1701
1702 static unsigned long sleep_duration;
1703
1704 /* Allow rcutorture to stall the grace-period kthread. */
rcu_gp_set_torture_wait(int duration)1705 void rcu_gp_set_torture_wait(int duration)
1706 {
1707 if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST) && duration > 0)
1708 WRITE_ONCE(sleep_duration, duration);
1709 }
1710 EXPORT_SYMBOL_GPL(rcu_gp_set_torture_wait);
1711
1712 /* Actually implement the aforementioned wait. */
rcu_gp_torture_wait(void)1713 static void rcu_gp_torture_wait(void)
1714 {
1715 unsigned long duration;
1716
1717 if (!IS_ENABLED(CONFIG_RCU_TORTURE_TEST))
1718 return;
1719 duration = xchg(&sleep_duration, 0UL);
1720 if (duration > 0) {
1721 pr_alert("%s: Waiting %lu jiffies\n", __func__, duration);
1722 schedule_timeout_idle(duration);
1723 pr_alert("%s: Wait complete\n", __func__);
1724 }
1725 }
1726
1727 /*
1728 * Handler for on_each_cpu() to invoke the target CPU's RCU core
1729 * processing.
1730 */
rcu_strict_gp_boundary(void * unused)1731 static void rcu_strict_gp_boundary(void *unused)
1732 {
1733 invoke_rcu_core();
1734 }
1735
1736 /*
1737 * Initialize a new grace period. Return false if no grace period required.
1738 */
rcu_gp_init(void)1739 static noinline_for_stack bool rcu_gp_init(void)
1740 {
1741 unsigned long firstseq;
1742 unsigned long flags;
1743 unsigned long oldmask;
1744 unsigned long mask;
1745 struct rcu_data *rdp;
1746 struct rcu_node *rnp = rcu_get_root();
1747
1748 WRITE_ONCE(rcu_state.gp_activity, jiffies);
1749 raw_spin_lock_irq_rcu_node(rnp);
1750 if (!READ_ONCE(rcu_state.gp_flags)) {
1751 /* Spurious wakeup, tell caller to go back to sleep. */
1752 raw_spin_unlock_irq_rcu_node(rnp);
1753 return false;
1754 }
1755 WRITE_ONCE(rcu_state.gp_flags, 0); /* Clear all flags: New GP. */
1756
1757 if (WARN_ON_ONCE(rcu_gp_in_progress())) {
1758 /*
1759 * Grace period already in progress, don't start another.
1760 * Not supposed to be able to happen.
1761 */
1762 raw_spin_unlock_irq_rcu_node(rnp);
1763 return false;
1764 }
1765
1766 /* Advance to a new grace period and initialize state. */
1767 record_gp_stall_check_time();
1768 /* Record GP times before starting GP, hence rcu_seq_start(). */
1769 rcu_seq_start(&rcu_state.gp_seq);
1770 ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq);
1771 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("start"));
1772 raw_spin_unlock_irq_rcu_node(rnp);
1773
1774 /*
1775 * Apply per-leaf buffered online and offline operations to
1776 * the rcu_node tree. Note that this new grace period need not
1777 * wait for subsequent online CPUs, and that RCU hooks in the CPU
1778 * offlining path, when combined with checks in this function,
1779 * will handle CPUs that are currently going offline or that will
1780 * go offline later. Please also refer to "Hotplug CPU" section
1781 * of RCU's Requirements documentation.
1782 */
1783 WRITE_ONCE(rcu_state.gp_state, RCU_GP_ONOFF);
1784 rcu_for_each_leaf_node(rnp) {
1785 smp_mb(); // Pair with barriers used when updating ->ofl_seq to odd values.
1786 firstseq = READ_ONCE(rnp->ofl_seq);
1787 if (firstseq & 0x1)
1788 while (firstseq == READ_ONCE(rnp->ofl_seq))
1789 schedule_timeout_idle(1); // Can't wake unless RCU is watching.
1790 smp_mb(); // Pair with barriers used when updating ->ofl_seq to even values.
1791 raw_spin_lock(&rcu_state.ofl_lock);
1792 raw_spin_lock_irq_rcu_node(rnp);
1793 if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
1794 !rnp->wait_blkd_tasks) {
1795 /* Nothing to do on this leaf rcu_node structure. */
1796 raw_spin_unlock_irq_rcu_node(rnp);
1797 raw_spin_unlock(&rcu_state.ofl_lock);
1798 continue;
1799 }
1800
1801 /* Record old state, apply changes to ->qsmaskinit field. */
1802 oldmask = rnp->qsmaskinit;
1803 rnp->qsmaskinit = rnp->qsmaskinitnext;
1804
1805 /* If zero-ness of ->qsmaskinit changed, propagate up tree. */
1806 if (!oldmask != !rnp->qsmaskinit) {
1807 if (!oldmask) { /* First online CPU for rcu_node. */
1808 if (!rnp->wait_blkd_tasks) /* Ever offline? */
1809 rcu_init_new_rnp(rnp);
1810 } else if (rcu_preempt_has_tasks(rnp)) {
1811 rnp->wait_blkd_tasks = true; /* blocked tasks */
1812 } else { /* Last offline CPU and can propagate. */
1813 rcu_cleanup_dead_rnp(rnp);
1814 }
1815 }
1816
1817 /*
1818 * If all waited-on tasks from prior grace period are
1819 * done, and if all this rcu_node structure's CPUs are
1820 * still offline, propagate up the rcu_node tree and
1821 * clear ->wait_blkd_tasks. Otherwise, if one of this
1822 * rcu_node structure's CPUs has since come back online,
1823 * simply clear ->wait_blkd_tasks.
1824 */
1825 if (rnp->wait_blkd_tasks &&
1826 (!rcu_preempt_has_tasks(rnp) || rnp->qsmaskinit)) {
1827 rnp->wait_blkd_tasks = false;
1828 if (!rnp->qsmaskinit)
1829 rcu_cleanup_dead_rnp(rnp);
1830 }
1831
1832 raw_spin_unlock_irq_rcu_node(rnp);
1833 raw_spin_unlock(&rcu_state.ofl_lock);
1834 }
1835 rcu_gp_slow(gp_preinit_delay); /* Races with CPU hotplug. */
1836
1837 /*
1838 * Set the quiescent-state-needed bits in all the rcu_node
1839 * structures for all currently online CPUs in breadth-first
1840 * order, starting from the root rcu_node structure, relying on the
1841 * layout of the tree within the rcu_state.node[] array. Note that
1842 * other CPUs will access only the leaves of the hierarchy, thus
1843 * seeing that no grace period is in progress, at least until the
1844 * corresponding leaf node has been initialized.
1845 *
1846 * The grace period cannot complete until the initialization
1847 * process finishes, because this kthread handles both.
1848 */
1849 WRITE_ONCE(rcu_state.gp_state, RCU_GP_INIT);
1850 rcu_for_each_node_breadth_first(rnp) {
1851 rcu_gp_slow(gp_init_delay);
1852 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1853 rdp = this_cpu_ptr(&rcu_data);
1854 rcu_preempt_check_blocked_tasks(rnp);
1855 rnp->qsmask = rnp->qsmaskinit;
1856 WRITE_ONCE(rnp->gp_seq, rcu_state.gp_seq);
1857 if (rnp == rdp->mynode)
1858 (void)__note_gp_changes(rnp, rdp);
1859 rcu_preempt_boost_start_gp(rnp);
1860 trace_rcu_grace_period_init(rcu_state.name, rnp->gp_seq,
1861 rnp->level, rnp->grplo,
1862 rnp->grphi, rnp->qsmask);
1863 /* Quiescent states for tasks on any now-offline CPUs. */
1864 mask = rnp->qsmask & ~rnp->qsmaskinitnext;
1865 rnp->rcu_gp_init_mask = mask;
1866 if ((mask || rnp->wait_blkd_tasks) && rcu_is_leaf_node(rnp))
1867 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
1868 else
1869 raw_spin_unlock_irq_rcu_node(rnp);
1870 cond_resched_tasks_rcu_qs();
1871 WRITE_ONCE(rcu_state.gp_activity, jiffies);
1872 }
1873
1874 // If strict, make all CPUs aware of new grace period.
1875 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
1876 on_each_cpu(rcu_strict_gp_boundary, NULL, 0);
1877
1878 return true;
1879 }
1880
1881 /*
1882 * Helper function for swait_event_idle_exclusive() wakeup at force-quiescent-state
1883 * time.
1884 */
rcu_gp_fqs_check_wake(int * gfp)1885 static bool rcu_gp_fqs_check_wake(int *gfp)
1886 {
1887 struct rcu_node *rnp = rcu_get_root();
1888
1889 // If under overload conditions, force an immediate FQS scan.
1890 if (*gfp & RCU_GP_FLAG_OVLD)
1891 return true;
1892
1893 // Someone like call_rcu() requested a force-quiescent-state scan.
1894 *gfp = READ_ONCE(rcu_state.gp_flags);
1895 if (*gfp & RCU_GP_FLAG_FQS)
1896 return true;
1897
1898 // The current grace period has completed.
1899 if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp))
1900 return true;
1901
1902 return false;
1903 }
1904
1905 /*
1906 * Do one round of quiescent-state forcing.
1907 */
rcu_gp_fqs(bool first_time)1908 static void rcu_gp_fqs(bool first_time)
1909 {
1910 int nr_fqs = READ_ONCE(rcu_state.nr_fqs_jiffies_stall);
1911 struct rcu_node *rnp = rcu_get_root();
1912
1913 WRITE_ONCE(rcu_state.gp_activity, jiffies);
1914 WRITE_ONCE(rcu_state.n_force_qs, rcu_state.n_force_qs + 1);
1915
1916 WARN_ON_ONCE(nr_fqs > 3);
1917 /* Only countdown nr_fqs for stall purposes if jiffies moves. */
1918 if (nr_fqs) {
1919 if (nr_fqs == 1) {
1920 WRITE_ONCE(rcu_state.jiffies_stall,
1921 jiffies + rcu_jiffies_till_stall_check());
1922 }
1923 WRITE_ONCE(rcu_state.nr_fqs_jiffies_stall, --nr_fqs);
1924 }
1925
1926 if (first_time) {
1927 /* Collect dyntick-idle snapshots. */
1928 force_qs_rnp(dyntick_save_progress_counter);
1929 } else {
1930 /* Handle dyntick-idle and offline CPUs. */
1931 force_qs_rnp(rcu_implicit_dynticks_qs);
1932 }
1933 /* Clear flag to prevent immediate re-entry. */
1934 if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
1935 raw_spin_lock_irq_rcu_node(rnp);
1936 WRITE_ONCE(rcu_state.gp_flags,
1937 READ_ONCE(rcu_state.gp_flags) & ~RCU_GP_FLAG_FQS);
1938 raw_spin_unlock_irq_rcu_node(rnp);
1939 }
1940 }
1941
1942 /*
1943 * Loop doing repeated quiescent-state forcing until the grace period ends.
1944 */
rcu_gp_fqs_loop(void)1945 static noinline_for_stack void rcu_gp_fqs_loop(void)
1946 {
1947 bool first_gp_fqs;
1948 int gf = 0;
1949 unsigned long j;
1950 int ret;
1951 struct rcu_node *rnp = rcu_get_root();
1952
1953 first_gp_fqs = true;
1954 j = READ_ONCE(jiffies_till_first_fqs);
1955 if (rcu_state.cbovld)
1956 gf = RCU_GP_FLAG_OVLD;
1957 ret = 0;
1958 for (;;) {
1959 if (!ret) {
1960 WRITE_ONCE(rcu_state.jiffies_force_qs, jiffies + j);
1961 /*
1962 * jiffies_force_qs before RCU_GP_WAIT_FQS state
1963 * update; required for stall checks.
1964 */
1965 smp_wmb();
1966 WRITE_ONCE(rcu_state.jiffies_kick_kthreads,
1967 jiffies + (j ? 3 * j : 2));
1968 }
1969 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1970 TPS("fqswait"));
1971 WRITE_ONCE(rcu_state.gp_state, RCU_GP_WAIT_FQS);
1972 (void)swait_event_idle_timeout_exclusive(rcu_state.gp_wq,
1973 rcu_gp_fqs_check_wake(&gf), j);
1974 rcu_gp_torture_wait();
1975 WRITE_ONCE(rcu_state.gp_state, RCU_GP_DOING_FQS);
1976 /* Locking provides needed memory barriers. */
1977 /* If grace period done, leave loop. */
1978 if (!READ_ONCE(rnp->qsmask) &&
1979 !rcu_preempt_blocked_readers_cgp(rnp))
1980 break;
1981 /* If time for quiescent-state forcing, do it. */
1982 if (!time_after(rcu_state.jiffies_force_qs, jiffies) ||
1983 (gf & (RCU_GP_FLAG_FQS | RCU_GP_FLAG_OVLD))) {
1984 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1985 TPS("fqsstart"));
1986 rcu_gp_fqs(first_gp_fqs);
1987 gf = 0;
1988 if (first_gp_fqs) {
1989 first_gp_fqs = false;
1990 gf = rcu_state.cbovld ? RCU_GP_FLAG_OVLD : 0;
1991 }
1992 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1993 TPS("fqsend"));
1994 cond_resched_tasks_rcu_qs();
1995 WRITE_ONCE(rcu_state.gp_activity, jiffies);
1996 ret = 0; /* Force full wait till next FQS. */
1997 j = READ_ONCE(jiffies_till_next_fqs);
1998 } else {
1999 /* Deal with stray signal. */
2000 cond_resched_tasks_rcu_qs();
2001 WRITE_ONCE(rcu_state.gp_activity, jiffies);
2002 WARN_ON(signal_pending(current));
2003 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2004 TPS("fqswaitsig"));
2005 ret = 1; /* Keep old FQS timing. */
2006 j = jiffies;
2007 if (time_after(jiffies, rcu_state.jiffies_force_qs))
2008 j = 1;
2009 else
2010 j = rcu_state.jiffies_force_qs - j;
2011 gf = 0;
2012 }
2013 }
2014 }
2015
2016 /*
2017 * Clean up after the old grace period.
2018 */
rcu_gp_cleanup(void)2019 static noinline void rcu_gp_cleanup(void)
2020 {
2021 int cpu;
2022 bool needgp = false;
2023 unsigned long gp_duration;
2024 unsigned long new_gp_seq;
2025 bool offloaded;
2026 struct rcu_data *rdp;
2027 struct rcu_node *rnp = rcu_get_root();
2028 struct swait_queue_head *sq;
2029
2030 WRITE_ONCE(rcu_state.gp_activity, jiffies);
2031 raw_spin_lock_irq_rcu_node(rnp);
2032 rcu_state.gp_end = jiffies;
2033 gp_duration = rcu_state.gp_end - rcu_state.gp_start;
2034 if (gp_duration > rcu_state.gp_max)
2035 rcu_state.gp_max = gp_duration;
2036
2037 /*
2038 * We know the grace period is complete, but to everyone else
2039 * it appears to still be ongoing. But it is also the case
2040 * that to everyone else it looks like there is nothing that
2041 * they can do to advance the grace period. It is therefore
2042 * safe for us to drop the lock in order to mark the grace
2043 * period as completed in all of the rcu_node structures.
2044 */
2045 raw_spin_unlock_irq_rcu_node(rnp);
2046
2047 /*
2048 * Propagate new ->gp_seq value to rcu_node structures so that
2049 * other CPUs don't have to wait until the start of the next grace
2050 * period to process their callbacks. This also avoids some nasty
2051 * RCU grace-period initialization races by forcing the end of
2052 * the current grace period to be completely recorded in all of
2053 * the rcu_node structures before the beginning of the next grace
2054 * period is recorded in any of the rcu_node structures.
2055 */
2056 new_gp_seq = rcu_state.gp_seq;
2057 rcu_seq_end(&new_gp_seq);
2058 rcu_for_each_node_breadth_first(rnp) {
2059 raw_spin_lock_irq_rcu_node(rnp);
2060 if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
2061 dump_blkd_tasks(rnp, 10);
2062 WARN_ON_ONCE(rnp->qsmask);
2063 WRITE_ONCE(rnp->gp_seq, new_gp_seq);
2064 rdp = this_cpu_ptr(&rcu_data);
2065 if (rnp == rdp->mynode)
2066 needgp = __note_gp_changes(rnp, rdp) || needgp;
2067 /* smp_mb() provided by prior unlock-lock pair. */
2068 needgp = rcu_future_gp_cleanup(rnp) || needgp;
2069 // Reset overload indication for CPUs no longer overloaded
2070 if (rcu_is_leaf_node(rnp))
2071 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->cbovldmask) {
2072 rdp = per_cpu_ptr(&rcu_data, cpu);
2073 check_cb_ovld_locked(rdp, rnp);
2074 }
2075 sq = rcu_nocb_gp_get(rnp);
2076 raw_spin_unlock_irq_rcu_node(rnp);
2077 rcu_nocb_gp_cleanup(sq);
2078 cond_resched_tasks_rcu_qs();
2079 WRITE_ONCE(rcu_state.gp_activity, jiffies);
2080 rcu_gp_slow(gp_cleanup_delay);
2081 }
2082 rnp = rcu_get_root();
2083 raw_spin_lock_irq_rcu_node(rnp); /* GP before ->gp_seq update. */
2084
2085 /* Declare grace period done, trace first to use old GP number. */
2086 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("end"));
2087 rcu_seq_end(&rcu_state.gp_seq);
2088 ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq);
2089 WRITE_ONCE(rcu_state.gp_state, RCU_GP_IDLE);
2090 /* Check for GP requests since above loop. */
2091 rdp = this_cpu_ptr(&rcu_data);
2092 if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) {
2093 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed,
2094 TPS("CleanupMore"));
2095 needgp = true;
2096 }
2097 /* Advance CBs to reduce false positives below. */
2098 offloaded = rcu_rdp_is_offloaded(rdp);
2099 if ((offloaded || !rcu_accelerate_cbs(rnp, rdp)) && needgp) {
2100 WRITE_ONCE(rcu_state.gp_flags, RCU_GP_FLAG_INIT);
2101 WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
2102 trace_rcu_grace_period(rcu_state.name,
2103 rcu_state.gp_seq,
2104 TPS("newreq"));
2105 } else {
2106 WRITE_ONCE(rcu_state.gp_flags,
2107 rcu_state.gp_flags & RCU_GP_FLAG_INIT);
2108 }
2109 raw_spin_unlock_irq_rcu_node(rnp);
2110
2111 // If strict, make all CPUs aware of the end of the old grace period.
2112 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
2113 on_each_cpu(rcu_strict_gp_boundary, NULL, 0);
2114 }
2115
2116 /*
2117 * Body of kthread that handles grace periods.
2118 */
rcu_gp_kthread(void * unused)2119 static int __noreturn rcu_gp_kthread(void *unused)
2120 {
2121 rcu_bind_gp_kthread();
2122 for (;;) {
2123
2124 /* Handle grace-period start. */
2125 for (;;) {
2126 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2127 TPS("reqwait"));
2128 WRITE_ONCE(rcu_state.gp_state, RCU_GP_WAIT_GPS);
2129 swait_event_idle_exclusive(rcu_state.gp_wq,
2130 READ_ONCE(rcu_state.gp_flags) &
2131 RCU_GP_FLAG_INIT);
2132 rcu_gp_torture_wait();
2133 WRITE_ONCE(rcu_state.gp_state, RCU_GP_DONE_GPS);
2134 /* Locking provides needed memory barrier. */
2135 if (rcu_gp_init())
2136 break;
2137 cond_resched_tasks_rcu_qs();
2138 WRITE_ONCE(rcu_state.gp_activity, jiffies);
2139 WARN_ON(signal_pending(current));
2140 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2141 TPS("reqwaitsig"));
2142 }
2143
2144 /* Handle quiescent-state forcing. */
2145 rcu_gp_fqs_loop();
2146
2147 /* Handle grace-period end. */
2148 WRITE_ONCE(rcu_state.gp_state, RCU_GP_CLEANUP);
2149 rcu_gp_cleanup();
2150 WRITE_ONCE(rcu_state.gp_state, RCU_GP_CLEANED);
2151 }
2152 }
2153
2154 /*
2155 * Report a full set of quiescent states to the rcu_state data structure.
2156 * Invoke rcu_gp_kthread_wake() to awaken the grace-period kthread if
2157 * another grace period is required. Whether we wake the grace-period
2158 * kthread or it awakens itself for the next round of quiescent-state
2159 * forcing, that kthread will clean up after the just-completed grace
2160 * period. Note that the caller must hold rnp->lock, which is released
2161 * before return.
2162 */
rcu_report_qs_rsp(unsigned long flags)2163 static void rcu_report_qs_rsp(unsigned long flags)
2164 __releases(rcu_get_root()->lock)
2165 {
2166 raw_lockdep_assert_held_rcu_node(rcu_get_root());
2167 WARN_ON_ONCE(!rcu_gp_in_progress());
2168 WRITE_ONCE(rcu_state.gp_flags,
2169 READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS);
2170 raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(), flags);
2171 rcu_gp_kthread_wake();
2172 }
2173
2174 /*
2175 * Similar to rcu_report_qs_rdp(), for which it is a helper function.
2176 * Allows quiescent states for a group of CPUs to be reported at one go
2177 * to the specified rcu_node structure, though all the CPUs in the group
2178 * must be represented by the same rcu_node structure (which need not be a
2179 * leaf rcu_node structure, though it often will be). The gps parameter
2180 * is the grace-period snapshot, which means that the quiescent states
2181 * are valid only if rnp->gp_seq is equal to gps. That structure's lock
2182 * must be held upon entry, and it is released before return.
2183 *
2184 * As a special case, if mask is zero, the bit-already-cleared check is
2185 * disabled. This allows propagating quiescent state due to resumed tasks
2186 * during grace-period initialization.
2187 */
rcu_report_qs_rnp(unsigned long mask,struct rcu_node * rnp,unsigned long gps,unsigned long flags)2188 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
2189 unsigned long gps, unsigned long flags)
2190 __releases(rnp->lock)
2191 {
2192 unsigned long oldmask = 0;
2193 struct rcu_node *rnp_c;
2194
2195 raw_lockdep_assert_held_rcu_node(rnp);
2196
2197 /* Walk up the rcu_node hierarchy. */
2198 for (;;) {
2199 if ((!(rnp->qsmask & mask) && mask) || rnp->gp_seq != gps) {
2200
2201 /*
2202 * Our bit has already been cleared, or the
2203 * relevant grace period is already over, so done.
2204 */
2205 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2206 return;
2207 }
2208 WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */
2209 WARN_ON_ONCE(!rcu_is_leaf_node(rnp) &&
2210 rcu_preempt_blocked_readers_cgp(rnp));
2211 WRITE_ONCE(rnp->qsmask, rnp->qsmask & ~mask);
2212 trace_rcu_quiescent_state_report(rcu_state.name, rnp->gp_seq,
2213 mask, rnp->qsmask, rnp->level,
2214 rnp->grplo, rnp->grphi,
2215 !!rnp->gp_tasks);
2216 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
2217
2218 /* Other bits still set at this level, so done. */
2219 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2220 return;
2221 }
2222 rnp->completedqs = rnp->gp_seq;
2223 mask = rnp->grpmask;
2224 if (rnp->parent == NULL) {
2225
2226 /* No more levels. Exit loop holding root lock. */
2227
2228 break;
2229 }
2230 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2231 rnp_c = rnp;
2232 rnp = rnp->parent;
2233 raw_spin_lock_irqsave_rcu_node(rnp, flags);
2234 oldmask = READ_ONCE(rnp_c->qsmask);
2235 }
2236
2237 /*
2238 * Get here if we are the last CPU to pass through a quiescent
2239 * state for this grace period. Invoke rcu_report_qs_rsp()
2240 * to clean up and start the next grace period if one is needed.
2241 */
2242 rcu_report_qs_rsp(flags); /* releases rnp->lock. */
2243 }
2244
2245 /*
2246 * Record a quiescent state for all tasks that were previously queued
2247 * on the specified rcu_node structure and that were blocking the current
2248 * RCU grace period. The caller must hold the corresponding rnp->lock with
2249 * irqs disabled, and this lock is released upon return, but irqs remain
2250 * disabled.
2251 */
2252 static void __maybe_unused
rcu_report_unblock_qs_rnp(struct rcu_node * rnp,unsigned long flags)2253 rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
2254 __releases(rnp->lock)
2255 {
2256 unsigned long gps;
2257 unsigned long mask;
2258 struct rcu_node *rnp_p;
2259
2260 raw_lockdep_assert_held_rcu_node(rnp);
2261 if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT_RCU)) ||
2262 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)) ||
2263 rnp->qsmask != 0) {
2264 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2265 return; /* Still need more quiescent states! */
2266 }
2267
2268 rnp->completedqs = rnp->gp_seq;
2269 rnp_p = rnp->parent;
2270 if (rnp_p == NULL) {
2271 /*
2272 * Only one rcu_node structure in the tree, so don't
2273 * try to report up to its nonexistent parent!
2274 */
2275 rcu_report_qs_rsp(flags);
2276 return;
2277 }
2278
2279 /* Report up the rest of the hierarchy, tracking current ->gp_seq. */
2280 gps = rnp->gp_seq;
2281 mask = rnp->grpmask;
2282 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
2283 raw_spin_lock_rcu_node(rnp_p); /* irqs already disabled. */
2284 rcu_report_qs_rnp(mask, rnp_p, gps, flags);
2285 }
2286
2287 /*
2288 * Record a quiescent state for the specified CPU to that CPU's rcu_data
2289 * structure. This must be called from the specified CPU.
2290 */
2291 static void
rcu_report_qs_rdp(struct rcu_data * rdp)2292 rcu_report_qs_rdp(struct rcu_data *rdp)
2293 {
2294 unsigned long flags;
2295 unsigned long mask;
2296 bool needwake = false;
2297 const bool offloaded = rcu_rdp_is_offloaded(rdp);
2298 struct rcu_node *rnp;
2299
2300 WARN_ON_ONCE(rdp->cpu != smp_processor_id());
2301 rnp = rdp->mynode;
2302 raw_spin_lock_irqsave_rcu_node(rnp, flags);
2303 if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq ||
2304 rdp->gpwrap) {
2305
2306 /*
2307 * The grace period in which this quiescent state was
2308 * recorded has ended, so don't report it upwards.
2309 * We will instead need a new quiescent state that lies
2310 * within the current grace period.
2311 */
2312 rdp->cpu_no_qs.b.norm = true; /* need qs for new gp. */
2313 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2314 return;
2315 }
2316 mask = rdp->grpmask;
2317 rdp->core_needs_qs = false;
2318 if ((rnp->qsmask & mask) == 0) {
2319 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2320 } else {
2321 /*
2322 * This GP can't end until cpu checks in, so all of our
2323 * callbacks can be processed during the next GP.
2324 */
2325 if (!offloaded)
2326 needwake = rcu_accelerate_cbs(rnp, rdp);
2327
2328 rcu_disable_urgency_upon_qs(rdp);
2329 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
2330 /* ^^^ Released rnp->lock */
2331 if (needwake)
2332 rcu_gp_kthread_wake();
2333 }
2334 }
2335
2336 /*
2337 * Check to see if there is a new grace period of which this CPU
2338 * is not yet aware, and if so, set up local rcu_data state for it.
2339 * Otherwise, see if this CPU has just passed through its first
2340 * quiescent state for this grace period, and record that fact if so.
2341 */
2342 static void
rcu_check_quiescent_state(struct rcu_data * rdp)2343 rcu_check_quiescent_state(struct rcu_data *rdp)
2344 {
2345 /* Check for grace-period ends and beginnings. */
2346 note_gp_changes(rdp);
2347
2348 /*
2349 * Does this CPU still need to do its part for current grace period?
2350 * If no, return and let the other CPUs do their part as well.
2351 */
2352 if (!rdp->core_needs_qs)
2353 return;
2354
2355 /*
2356 * Was there a quiescent state since the beginning of the grace
2357 * period? If no, then exit and wait for the next call.
2358 */
2359 if (rdp->cpu_no_qs.b.norm)
2360 return;
2361
2362 /*
2363 * Tell RCU we are done (but rcu_report_qs_rdp() will be the
2364 * judge of that).
2365 */
2366 rcu_report_qs_rdp(rdp);
2367 }
2368
2369 /*
2370 * Near the end of the offline process. Trace the fact that this CPU
2371 * is going offline.
2372 */
rcutree_dying_cpu(unsigned int cpu)2373 int rcutree_dying_cpu(unsigned int cpu)
2374 {
2375 bool blkd;
2376 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
2377 struct rcu_node *rnp = rdp->mynode;
2378
2379 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
2380 return 0;
2381
2382 blkd = !!(rnp->qsmask & rdp->grpmask);
2383 trace_rcu_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
2384 blkd ? TPS("cpuofl-bgp") : TPS("cpuofl"));
2385 return 0;
2386 }
2387
2388 /*
2389 * All CPUs for the specified rcu_node structure have gone offline,
2390 * and all tasks that were preempted within an RCU read-side critical
2391 * section while running on one of those CPUs have since exited their RCU
2392 * read-side critical section. Some other CPU is reporting this fact with
2393 * the specified rcu_node structure's ->lock held and interrupts disabled.
2394 * This function therefore goes up the tree of rcu_node structures,
2395 * clearing the corresponding bits in the ->qsmaskinit fields. Note that
2396 * the leaf rcu_node structure's ->qsmaskinit field has already been
2397 * updated.
2398 *
2399 * This function does check that the specified rcu_node structure has
2400 * all CPUs offline and no blocked tasks, so it is OK to invoke it
2401 * prematurely. That said, invoking it after the fact will cost you
2402 * a needless lock acquisition. So once it has done its work, don't
2403 * invoke it again.
2404 */
rcu_cleanup_dead_rnp(struct rcu_node * rnp_leaf)2405 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
2406 {
2407 long mask;
2408 struct rcu_node *rnp = rnp_leaf;
2409
2410 raw_lockdep_assert_held_rcu_node(rnp_leaf);
2411 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
2412 WARN_ON_ONCE(rnp_leaf->qsmaskinit) ||
2413 WARN_ON_ONCE(rcu_preempt_has_tasks(rnp_leaf)))
2414 return;
2415 for (;;) {
2416 mask = rnp->grpmask;
2417 rnp = rnp->parent;
2418 if (!rnp)
2419 break;
2420 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
2421 rnp->qsmaskinit &= ~mask;
2422 /* Between grace periods, so better already be zero! */
2423 WARN_ON_ONCE(rnp->qsmask);
2424 if (rnp->qsmaskinit) {
2425 raw_spin_unlock_rcu_node(rnp);
2426 /* irqs remain disabled. */
2427 return;
2428 }
2429 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
2430 }
2431 }
2432
2433 /*
2434 * The CPU has been completely removed, and some other CPU is reporting
2435 * this fact from process context. Do the remainder of the cleanup.
2436 * There can only be one CPU hotplug operation at a time, so no need for
2437 * explicit locking.
2438 */
rcutree_dead_cpu(unsigned int cpu)2439 int rcutree_dead_cpu(unsigned int cpu)
2440 {
2441 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
2442 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
2443
2444 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
2445 return 0;
2446
2447 WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus - 1);
2448 /* Adjust any no-longer-needed kthreads. */
2449 rcu_boost_kthread_setaffinity(rnp, -1);
2450 // Stop-machine done, so allow nohz_full to disable tick.
2451 tick_dep_clear(TICK_DEP_BIT_RCU);
2452 return 0;
2453 }
2454
2455 /*
2456 * Invoke any RCU callbacks that have made it to the end of their grace
2457 * period. Throttle as specified by rdp->blimit.
2458 */
rcu_do_batch(struct rcu_data * rdp)2459 static void rcu_do_batch(struct rcu_data *rdp)
2460 {
2461 int div;
2462 bool __maybe_unused empty;
2463 unsigned long flags;
2464 const bool offloaded = rcu_rdp_is_offloaded(rdp);
2465 struct rcu_head *rhp;
2466 struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
2467 long bl, count = 0;
2468 long pending, tlimit = 0;
2469
2470 /* If no callbacks are ready, just return. */
2471 if (!rcu_segcblist_ready_cbs(&rdp->cblist)) {
2472 trace_rcu_batch_start(rcu_state.name,
2473 rcu_segcblist_n_cbs(&rdp->cblist), 0);
2474 trace_rcu_batch_end(rcu_state.name, 0,
2475 !rcu_segcblist_empty(&rdp->cblist),
2476 need_resched(), is_idle_task(current),
2477 rcu_is_callbacks_kthread());
2478 return;
2479 }
2480
2481 /*
2482 * Extract the list of ready callbacks, disabling to prevent
2483 * races with call_rcu() from interrupt handlers. Leave the
2484 * callback counts, as rcu_barrier() needs to be conservative.
2485 */
2486 local_irq_save(flags);
2487 rcu_nocb_lock(rdp);
2488 WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
2489 pending = rcu_segcblist_n_cbs(&rdp->cblist);
2490 div = READ_ONCE(rcu_divisor);
2491 div = div < 0 ? 7 : div > sizeof(long) * 8 - 2 ? sizeof(long) * 8 - 2 : div;
2492 bl = max(rdp->blimit, pending >> div);
2493 if (in_serving_softirq() && unlikely(bl > 100)) {
2494 long rrn = READ_ONCE(rcu_resched_ns);
2495
2496 rrn = rrn < NSEC_PER_MSEC ? NSEC_PER_MSEC : rrn > NSEC_PER_SEC ? NSEC_PER_SEC : rrn;
2497 tlimit = local_clock() + rrn;
2498 }
2499 trace_rcu_batch_start(rcu_state.name,
2500 rcu_segcblist_n_cbs(&rdp->cblist), bl);
2501 rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl);
2502 if (offloaded)
2503 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
2504
2505 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbDequeued"));
2506 rcu_nocb_unlock_irqrestore(rdp, flags);
2507
2508 /* Invoke callbacks. */
2509 tick_dep_set_task(current, TICK_DEP_BIT_RCU);
2510 rhp = rcu_cblist_dequeue(&rcl);
2511
2512 for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) {
2513 rcu_callback_t f;
2514
2515 count++;
2516 debug_rcu_head_unqueue(rhp);
2517
2518 rcu_lock_acquire(&rcu_callback_map);
2519 trace_rcu_invoke_callback(rcu_state.name, rhp);
2520
2521 f = rhp->func;
2522 WRITE_ONCE(rhp->func, (rcu_callback_t)0L);
2523 f(rhp);
2524
2525 rcu_lock_release(&rcu_callback_map);
2526
2527 /*
2528 * Stop only if limit reached and CPU has something to do.
2529 */
2530 if (in_serving_softirq()) {
2531 if (count >= bl && (need_resched() ||
2532 (!is_idle_task(current) && !rcu_is_callbacks_kthread())))
2533 break;
2534
2535 /*
2536 * Make sure we don't spend too much time here and deprive other
2537 * softirq vectors of CPU cycles.
2538 */
2539 if (unlikely(tlimit)) {
2540 /* only call local_clock() every 32 callbacks */
2541 if (likely((count & 31) || local_clock() < tlimit))
2542 continue;
2543 /* Exceeded the time limit, so leave. */
2544 break;
2545 }
2546 } else {
2547 local_bh_enable();
2548 lockdep_assert_irqs_enabled();
2549 cond_resched_tasks_rcu_qs();
2550 lockdep_assert_irqs_enabled();
2551 local_bh_disable();
2552 }
2553 }
2554
2555 local_irq_save(flags);
2556 rcu_nocb_lock(rdp);
2557 rdp->n_cbs_invoked += count;
2558 trace_rcu_batch_end(rcu_state.name, count, !!rcl.head, need_resched(),
2559 is_idle_task(current), rcu_is_callbacks_kthread());
2560
2561 /* Update counts and requeue any remaining callbacks. */
2562 rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl);
2563 rcu_segcblist_add_len(&rdp->cblist, -count);
2564
2565 /* Reinstate batch limit if we have worked down the excess. */
2566 count = rcu_segcblist_n_cbs(&rdp->cblist);
2567 if (rdp->blimit >= DEFAULT_MAX_RCU_BLIMIT && count <= qlowmark)
2568 rdp->blimit = blimit;
2569
2570 /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
2571 if (count == 0 && rdp->qlen_last_fqs_check != 0) {
2572 rdp->qlen_last_fqs_check = 0;
2573 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
2574 } else if (count < rdp->qlen_last_fqs_check - qhimark)
2575 rdp->qlen_last_fqs_check = count;
2576
2577 /*
2578 * The following usually indicates a double call_rcu(). To track
2579 * this down, try building with CONFIG_DEBUG_OBJECTS_RCU_HEAD=y.
2580 */
2581 empty = rcu_segcblist_empty(&rdp->cblist);
2582 WARN_ON_ONCE(count == 0 && !empty);
2583 WARN_ON_ONCE(!IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
2584 count != 0 && empty);
2585 WARN_ON_ONCE(count == 0 && rcu_segcblist_n_segment_cbs(&rdp->cblist) != 0);
2586 WARN_ON_ONCE(!empty && rcu_segcblist_n_segment_cbs(&rdp->cblist) == 0);
2587
2588 rcu_nocb_unlock_irqrestore(rdp, flags);
2589
2590 /* Re-invoke RCU core processing if there are callbacks remaining. */
2591 if (!offloaded && rcu_segcblist_ready_cbs(&rdp->cblist))
2592 invoke_rcu_core();
2593 tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
2594 }
2595
2596 /*
2597 * This function is invoked from each scheduling-clock interrupt,
2598 * and checks to see if this CPU is in a non-context-switch quiescent
2599 * state, for example, user mode or idle loop. It also schedules RCU
2600 * core processing. If the current grace period has gone on too long,
2601 * it will ask the scheduler to manufacture a context switch for the sole
2602 * purpose of providing the needed quiescent state.
2603 */
rcu_sched_clock_irq(int user)2604 void rcu_sched_clock_irq(int user)
2605 {
2606 trace_rcu_utilization(TPS("Start scheduler-tick"));
2607 lockdep_assert_irqs_disabled();
2608 raw_cpu_inc(rcu_data.ticks_this_gp);
2609 /* The load-acquire pairs with the store-release setting to true. */
2610 if (smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) {
2611 /* Idle and userspace execution already are quiescent states. */
2612 if (!rcu_is_cpu_rrupt_from_idle() && !user) {
2613 set_tsk_need_resched(current);
2614 set_preempt_need_resched();
2615 }
2616 __this_cpu_write(rcu_data.rcu_urgent_qs, false);
2617 }
2618 rcu_flavor_sched_clock_irq(user);
2619 if (rcu_pending(user))
2620 invoke_rcu_core();
2621 lockdep_assert_irqs_disabled();
2622
2623 trace_rcu_utilization(TPS("End scheduler-tick"));
2624 }
2625
2626 /*
2627 * Scan the leaf rcu_node structures. For each structure on which all
2628 * CPUs have reported a quiescent state and on which there are tasks
2629 * blocking the current grace period, initiate RCU priority boosting.
2630 * Otherwise, invoke the specified function to check dyntick state for
2631 * each CPU that has not yet reported a quiescent state.
2632 */
force_qs_rnp(int (* f)(struct rcu_data * rdp))2633 static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
2634 {
2635 int cpu;
2636 unsigned long flags;
2637 unsigned long mask;
2638 struct rcu_data *rdp;
2639 struct rcu_node *rnp;
2640
2641 rcu_state.cbovld = rcu_state.cbovldnext;
2642 rcu_state.cbovldnext = false;
2643 rcu_for_each_leaf_node(rnp) {
2644 cond_resched_tasks_rcu_qs();
2645 mask = 0;
2646 raw_spin_lock_irqsave_rcu_node(rnp, flags);
2647 rcu_state.cbovldnext |= !!rnp->cbovldmask;
2648 if (rnp->qsmask == 0) {
2649 if (rcu_preempt_blocked_readers_cgp(rnp)) {
2650 /*
2651 * No point in scanning bits because they
2652 * are all zero. But we might need to
2653 * priority-boost blocked readers.
2654 */
2655 rcu_initiate_boost(rnp, flags);
2656 /* rcu_initiate_boost() releases rnp->lock */
2657 continue;
2658 }
2659 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2660 continue;
2661 }
2662 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->qsmask) {
2663 rdp = per_cpu_ptr(&rcu_data, cpu);
2664 if (f(rdp)) {
2665 mask |= rdp->grpmask;
2666 rcu_disable_urgency_upon_qs(rdp);
2667 }
2668 }
2669 if (mask != 0) {
2670 /* Idle/offline CPUs, report (releases rnp->lock). */
2671 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
2672 } else {
2673 /* Nothing to do here, so just drop the lock. */
2674 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2675 }
2676 }
2677 }
2678
2679 /*
2680 * Force quiescent states on reluctant CPUs, and also detect which
2681 * CPUs are in dyntick-idle mode.
2682 */
rcu_force_quiescent_state(void)2683 void rcu_force_quiescent_state(void)
2684 {
2685 unsigned long flags;
2686 bool ret;
2687 struct rcu_node *rnp;
2688 struct rcu_node *rnp_old = NULL;
2689
2690 /* Funnel through hierarchy to reduce memory contention. */
2691 rnp = raw_cpu_read(rcu_data.mynode);
2692 for (; rnp != NULL; rnp = rnp->parent) {
2693 ret = (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) ||
2694 !raw_spin_trylock(&rnp->fqslock);
2695 if (rnp_old != NULL)
2696 raw_spin_unlock(&rnp_old->fqslock);
2697 if (ret)
2698 return;
2699 rnp_old = rnp;
2700 }
2701 /* rnp_old == rcu_get_root(), rnp == NULL. */
2702
2703 /* Reached the root of the rcu_node tree, acquire lock. */
2704 raw_spin_lock_irqsave_rcu_node(rnp_old, flags);
2705 raw_spin_unlock(&rnp_old->fqslock);
2706 if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
2707 raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2708 return; /* Someone beat us to it. */
2709 }
2710 WRITE_ONCE(rcu_state.gp_flags,
2711 READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS);
2712 raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2713 rcu_gp_kthread_wake();
2714 }
2715 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
2716
2717 // Workqueue handler for an RCU reader for kernels enforcing struct RCU
2718 // grace periods.
strict_work_handler(struct work_struct * work)2719 static void strict_work_handler(struct work_struct *work)
2720 {
2721 rcu_read_lock();
2722 rcu_read_unlock();
2723 }
2724
2725 /* Perform RCU core processing work for the current CPU. */
rcu_core(void)2726 static __latent_entropy void rcu_core(void)
2727 {
2728 unsigned long flags;
2729 struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);
2730 struct rcu_node *rnp = rdp->mynode;
2731 const bool do_batch = !rcu_segcblist_completely_offloaded(&rdp->cblist);
2732
2733 if (cpu_is_offline(smp_processor_id()))
2734 return;
2735 trace_rcu_utilization(TPS("Start RCU core"));
2736 WARN_ON_ONCE(!rdp->beenonline);
2737
2738 /* Report any deferred quiescent states if preemption enabled. */
2739 if (!(preempt_count() & PREEMPT_MASK)) {
2740 rcu_preempt_deferred_qs(current);
2741 } else if (rcu_preempt_need_deferred_qs(current)) {
2742 set_tsk_need_resched(current);
2743 set_preempt_need_resched();
2744 }
2745
2746 /* Update RCU state based on any recent quiescent states. */
2747 rcu_check_quiescent_state(rdp);
2748
2749 /* No grace period and unregistered callbacks? */
2750 if (!rcu_gp_in_progress() &&
2751 rcu_segcblist_is_enabled(&rdp->cblist) && do_batch) {
2752 rcu_nocb_lock_irqsave(rdp, flags);
2753 if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
2754 rcu_accelerate_cbs_unlocked(rnp, rdp);
2755 rcu_nocb_unlock_irqrestore(rdp, flags);
2756 }
2757
2758 rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check());
2759
2760 /* If there are callbacks ready, invoke them. */
2761 if (do_batch && rcu_segcblist_ready_cbs(&rdp->cblist) &&
2762 likely(READ_ONCE(rcu_scheduler_fully_active)))
2763 rcu_do_batch(rdp);
2764
2765 /* Do any needed deferred wakeups of rcuo kthreads. */
2766 do_nocb_deferred_wakeup(rdp);
2767 trace_rcu_utilization(TPS("End RCU core"));
2768
2769 // If strict GPs, schedule an RCU reader in a clean environment.
2770 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
2771 queue_work_on(rdp->cpu, rcu_gp_wq, &rdp->strict_work);
2772 }
2773
rcu_core_si(struct softirq_action * h)2774 static void rcu_core_si(struct softirq_action *h)
2775 {
2776 rcu_core();
2777 }
2778
rcu_wake_cond(struct task_struct * t,int status)2779 static void rcu_wake_cond(struct task_struct *t, int status)
2780 {
2781 /*
2782 * If the thread is yielding, only wake it when this
2783 * is invoked from idle
2784 */
2785 if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current)))
2786 wake_up_process(t);
2787 }
2788
invoke_rcu_core_kthread(void)2789 static void invoke_rcu_core_kthread(void)
2790 {
2791 struct task_struct *t;
2792 unsigned long flags;
2793
2794 local_irq_save(flags);
2795 __this_cpu_write(rcu_data.rcu_cpu_has_work, 1);
2796 t = __this_cpu_read(rcu_data.rcu_cpu_kthread_task);
2797 if (t != NULL && t != current)
2798 rcu_wake_cond(t, __this_cpu_read(rcu_data.rcu_cpu_kthread_status));
2799 local_irq_restore(flags);
2800 }
2801
2802 /*
2803 * Wake up this CPU's rcuc kthread to do RCU core processing.
2804 */
invoke_rcu_core(void)2805 static void invoke_rcu_core(void)
2806 {
2807 if (!cpu_online(smp_processor_id()))
2808 return;
2809 if (use_softirq)
2810 raise_softirq(RCU_SOFTIRQ);
2811 else
2812 invoke_rcu_core_kthread();
2813 }
2814
rcu_cpu_kthread_park(unsigned int cpu)2815 static void rcu_cpu_kthread_park(unsigned int cpu)
2816 {
2817 per_cpu(rcu_data.rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
2818 }
2819
rcu_cpu_kthread_should_run(unsigned int cpu)2820 static int rcu_cpu_kthread_should_run(unsigned int cpu)
2821 {
2822 return __this_cpu_read(rcu_data.rcu_cpu_has_work);
2823 }
2824
2825 /*
2826 * Per-CPU kernel thread that invokes RCU callbacks. This replaces
2827 * the RCU softirq used in configurations of RCU that do not support RCU
2828 * priority boosting.
2829 */
rcu_cpu_kthread(unsigned int cpu)2830 static void rcu_cpu_kthread(unsigned int cpu)
2831 {
2832 unsigned int *statusp = this_cpu_ptr(&rcu_data.rcu_cpu_kthread_status);
2833 char work, *workp = this_cpu_ptr(&rcu_data.rcu_cpu_has_work);
2834 int spincnt;
2835
2836 trace_rcu_utilization(TPS("Start CPU kthread@rcu_run"));
2837 for (spincnt = 0; spincnt < 10; spincnt++) {
2838 local_bh_disable();
2839 *statusp = RCU_KTHREAD_RUNNING;
2840 local_irq_disable();
2841 work = *workp;
2842 *workp = 0;
2843 local_irq_enable();
2844 if (work)
2845 rcu_core();
2846 local_bh_enable();
2847 if (*workp == 0) {
2848 trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
2849 *statusp = RCU_KTHREAD_WAITING;
2850 return;
2851 }
2852 }
2853 *statusp = RCU_KTHREAD_YIELDING;
2854 trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
2855 schedule_timeout_idle(2);
2856 trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
2857 *statusp = RCU_KTHREAD_WAITING;
2858 }
2859
2860 static struct smp_hotplug_thread rcu_cpu_thread_spec = {
2861 .store = &rcu_data.rcu_cpu_kthread_task,
2862 .thread_should_run = rcu_cpu_kthread_should_run,
2863 .thread_fn = rcu_cpu_kthread,
2864 .thread_comm = "rcuc/%u",
2865 .setup = rcu_cpu_kthread_setup,
2866 .park = rcu_cpu_kthread_park,
2867 };
2868
2869 /*
2870 * Spawn per-CPU RCU core processing kthreads.
2871 */
rcu_spawn_core_kthreads(void)2872 static int __init rcu_spawn_core_kthreads(void)
2873 {
2874 int cpu;
2875
2876 for_each_possible_cpu(cpu)
2877 per_cpu(rcu_data.rcu_cpu_has_work, cpu) = 0;
2878 if (!IS_ENABLED(CONFIG_RCU_BOOST) && use_softirq)
2879 return 0;
2880 WARN_ONCE(smpboot_register_percpu_thread(&rcu_cpu_thread_spec),
2881 "%s: Could not start rcuc kthread, OOM is now expected behavior\n", __func__);
2882 return 0;
2883 }
2884
2885 /*
2886 * Handle any core-RCU processing required by a call_rcu() invocation.
2887 */
__call_rcu_core(struct rcu_data * rdp,struct rcu_head * head,unsigned long flags)2888 static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head,
2889 unsigned long flags)
2890 {
2891 /*
2892 * If called from an extended quiescent state, invoke the RCU
2893 * core in order to force a re-evaluation of RCU's idleness.
2894 */
2895 if (!rcu_is_watching())
2896 invoke_rcu_core();
2897
2898 /* If interrupts were disabled or CPU offline, don't invoke RCU core. */
2899 if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id()))
2900 return;
2901
2902 /*
2903 * Force the grace period if too many callbacks or too long waiting.
2904 * Enforce hysteresis, and don't invoke rcu_force_quiescent_state()
2905 * if some other CPU has recently done so. Also, don't bother
2906 * invoking rcu_force_quiescent_state() if the newly enqueued callback
2907 * is the only one waiting for a grace period to complete.
2908 */
2909 if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) >
2910 rdp->qlen_last_fqs_check + qhimark)) {
2911
2912 /* Are we ignoring a completed grace period? */
2913 note_gp_changes(rdp);
2914
2915 /* Start a new grace period if one not already started. */
2916 if (!rcu_gp_in_progress()) {
2917 rcu_accelerate_cbs_unlocked(rdp->mynode, rdp);
2918 } else {
2919 /* Give the grace period a kick. */
2920 rdp->blimit = DEFAULT_MAX_RCU_BLIMIT;
2921 if (READ_ONCE(rcu_state.n_force_qs) == rdp->n_force_qs_snap &&
2922 rcu_segcblist_first_pend_cb(&rdp->cblist) != head)
2923 rcu_force_quiescent_state();
2924 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
2925 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
2926 }
2927 }
2928 }
2929
2930 /*
2931 * RCU callback function to leak a callback.
2932 */
rcu_leak_callback(struct rcu_head * rhp)2933 static void rcu_leak_callback(struct rcu_head *rhp)
2934 {
2935 }
2936
2937 /*
2938 * Check and if necessary update the leaf rcu_node structure's
2939 * ->cbovldmask bit corresponding to the current CPU based on that CPU's
2940 * number of queued RCU callbacks. The caller must hold the leaf rcu_node
2941 * structure's ->lock.
2942 */
check_cb_ovld_locked(struct rcu_data * rdp,struct rcu_node * rnp)2943 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp)
2944 {
2945 raw_lockdep_assert_held_rcu_node(rnp);
2946 if (qovld_calc <= 0)
2947 return; // Early boot and wildcard value set.
2948 if (rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc)
2949 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask | rdp->grpmask);
2950 else
2951 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask & ~rdp->grpmask);
2952 }
2953
2954 /*
2955 * Check and if necessary update the leaf rcu_node structure's
2956 * ->cbovldmask bit corresponding to the current CPU based on that CPU's
2957 * number of queued RCU callbacks. No locks need be held, but the
2958 * caller must have disabled interrupts.
2959 *
2960 * Note that this function ignores the possibility that there are a lot
2961 * of callbacks all of which have already seen the end of their respective
2962 * grace periods. This omission is due to the need for no-CBs CPUs to
2963 * be holding ->nocb_lock to do this check, which is too heavy for a
2964 * common-case operation.
2965 */
check_cb_ovld(struct rcu_data * rdp)2966 static void check_cb_ovld(struct rcu_data *rdp)
2967 {
2968 struct rcu_node *const rnp = rdp->mynode;
2969
2970 if (qovld_calc <= 0 ||
2971 ((rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc) ==
2972 !!(READ_ONCE(rnp->cbovldmask) & rdp->grpmask)))
2973 return; // Early boot wildcard value or already set correctly.
2974 raw_spin_lock_rcu_node(rnp);
2975 check_cb_ovld_locked(rdp, rnp);
2976 raw_spin_unlock_rcu_node(rnp);
2977 }
2978
2979 /* Helper function for call_rcu() and friends. */
2980 static void
__call_rcu(struct rcu_head * head,rcu_callback_t func)2981 __call_rcu(struct rcu_head *head, rcu_callback_t func)
2982 {
2983 static atomic_t doublefrees;
2984 unsigned long flags;
2985 struct rcu_data *rdp;
2986 bool was_alldone;
2987
2988 /* Misaligned rcu_head! */
2989 WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1));
2990
2991 if (debug_rcu_head_queue(head)) {
2992 /*
2993 * Probable double call_rcu(), so leak the callback.
2994 * Use rcu:rcu_callback trace event to find the previous
2995 * time callback was passed to __call_rcu().
2996 */
2997 if (atomic_inc_return(&doublefrees) < 4) {
2998 pr_err("%s(): Double-freed CB %p->%pS()!!! ", __func__, head, head->func);
2999 mem_dump_obj(head);
3000 }
3001 WRITE_ONCE(head->func, rcu_leak_callback);
3002 return;
3003 }
3004 head->func = func;
3005 head->next = NULL;
3006 local_irq_save(flags);
3007 kasan_record_aux_stack_noalloc(head);
3008 rdp = this_cpu_ptr(&rcu_data);
3009
3010 /* Add the callback to our list. */
3011 if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist))) {
3012 // This can trigger due to call_rcu() from offline CPU:
3013 WARN_ON_ONCE(rcu_scheduler_active != RCU_SCHEDULER_INACTIVE);
3014 WARN_ON_ONCE(!rcu_is_watching());
3015 // Very early boot, before rcu_init(). Initialize if needed
3016 // and then drop through to queue the callback.
3017 if (rcu_segcblist_empty(&rdp->cblist))
3018 rcu_segcblist_init(&rdp->cblist);
3019 }
3020
3021 check_cb_ovld(rdp);
3022 if (rcu_nocb_try_bypass(rdp, head, &was_alldone, flags))
3023 return; // Enqueued onto ->nocb_bypass, so just leave.
3024 // If no-CBs CPU gets here, rcu_nocb_try_bypass() acquired ->nocb_lock.
3025 rcu_segcblist_enqueue(&rdp->cblist, head);
3026 if (__is_kvfree_rcu_offset((unsigned long)func))
3027 trace_rcu_kvfree_callback(rcu_state.name, head,
3028 (unsigned long)func,
3029 rcu_segcblist_n_cbs(&rdp->cblist));
3030 else
3031 trace_rcu_callback(rcu_state.name, head,
3032 rcu_segcblist_n_cbs(&rdp->cblist));
3033
3034 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCBQueued"));
3035
3036 /* Go handle any RCU core processing required. */
3037 if (unlikely(rcu_rdp_is_offloaded(rdp))) {
3038 __call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */
3039 } else {
3040 __call_rcu_core(rdp, head, flags);
3041 local_irq_restore(flags);
3042 }
3043 }
3044
3045 /**
3046 * call_rcu() - Queue an RCU callback for invocation after a grace period.
3047 * @head: structure to be used for queueing the RCU updates.
3048 * @func: actual callback function to be invoked after the grace period
3049 *
3050 * The callback function will be invoked some time after a full grace
3051 * period elapses, in other words after all pre-existing RCU read-side
3052 * critical sections have completed. However, the callback function
3053 * might well execute concurrently with RCU read-side critical sections
3054 * that started after call_rcu() was invoked.
3055 *
3056 * RCU read-side critical sections are delimited by rcu_read_lock()
3057 * and rcu_read_unlock(), and may be nested. In addition, but only in
3058 * v5.0 and later, regions of code across which interrupts, preemption,
3059 * or softirqs have been disabled also serve as RCU read-side critical
3060 * sections. This includes hardware interrupt handlers, softirq handlers,
3061 * and NMI handlers.
3062 *
3063 * Note that all CPUs must agree that the grace period extended beyond
3064 * all pre-existing RCU read-side critical section. On systems with more
3065 * than one CPU, this means that when "func()" is invoked, each CPU is
3066 * guaranteed to have executed a full memory barrier since the end of its
3067 * last RCU read-side critical section whose beginning preceded the call
3068 * to call_rcu(). It also means that each CPU executing an RCU read-side
3069 * critical section that continues beyond the start of "func()" must have
3070 * executed a memory barrier after the call_rcu() but before the beginning
3071 * of that RCU read-side critical section. Note that these guarantees
3072 * include CPUs that are offline, idle, or executing in user mode, as
3073 * well as CPUs that are executing in the kernel.
3074 *
3075 * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
3076 * resulting RCU callback function "func()", then both CPU A and CPU B are
3077 * guaranteed to execute a full memory barrier during the time interval
3078 * between the call to call_rcu() and the invocation of "func()" -- even
3079 * if CPU A and CPU B are the same CPU (but again only if the system has
3080 * more than one CPU).
3081 *
3082 * Implementation of these memory-ordering guarantees is described here:
3083 * Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst.
3084 */
call_rcu(struct rcu_head * head,rcu_callback_t func)3085 void call_rcu(struct rcu_head *head, rcu_callback_t func)
3086 {
3087 __call_rcu(head, func);
3088 }
3089 EXPORT_SYMBOL_GPL(call_rcu);
3090
3091
3092 /* Maximum number of jiffies to wait before draining a batch. */
3093 #define KFREE_DRAIN_JIFFIES (HZ / 50)
3094 #define KFREE_N_BATCHES 2
3095 #define FREE_N_CHANNELS 2
3096
3097 /**
3098 * struct kvfree_rcu_bulk_data - single block to store kvfree_rcu() pointers
3099 * @nr_records: Number of active pointers in the array
3100 * @next: Next bulk object in the block chain
3101 * @records: Array of the kvfree_rcu() pointers
3102 */
3103 struct kvfree_rcu_bulk_data {
3104 unsigned long nr_records;
3105 struct kvfree_rcu_bulk_data *next;
3106 void *records[];
3107 };
3108
3109 /*
3110 * This macro defines how many entries the "records" array
3111 * will contain. It is based on the fact that the size of
3112 * kvfree_rcu_bulk_data structure becomes exactly one page.
3113 */
3114 #define KVFREE_BULK_MAX_ENTR \
3115 ((PAGE_SIZE - sizeof(struct kvfree_rcu_bulk_data)) / sizeof(void *))
3116
3117 /**
3118 * struct kfree_rcu_cpu_work - single batch of kfree_rcu() requests
3119 * @rcu_work: Let queue_rcu_work() invoke workqueue handler after grace period
3120 * @head_free: List of kfree_rcu() objects waiting for a grace period
3121 * @bkvhead_free: Bulk-List of kvfree_rcu() objects waiting for a grace period
3122 * @krcp: Pointer to @kfree_rcu_cpu structure
3123 */
3124
3125 struct kfree_rcu_cpu_work {
3126 struct rcu_work rcu_work;
3127 struct rcu_head *head_free;
3128 struct kvfree_rcu_bulk_data *bkvhead_free[FREE_N_CHANNELS];
3129 struct kfree_rcu_cpu *krcp;
3130 };
3131
3132 /**
3133 * struct kfree_rcu_cpu - batch up kfree_rcu() requests for RCU grace period
3134 * @head: List of kfree_rcu() objects not yet waiting for a grace period
3135 * @bkvhead: Bulk-List of kvfree_rcu() objects not yet waiting for a grace period
3136 * @krw_arr: Array of batches of kfree_rcu() objects waiting for a grace period
3137 * @lock: Synchronize access to this structure
3138 * @monitor_work: Promote @head to @head_free after KFREE_DRAIN_JIFFIES
3139 * @monitor_todo: Tracks whether a @monitor_work delayed work is pending
3140 * @initialized: The @rcu_work fields have been initialized
3141 * @count: Number of objects for which GP not started
3142 * @bkvcache:
3143 * A simple cache list that contains objects for reuse purpose.
3144 * In order to save some per-cpu space the list is singular.
3145 * Even though it is lockless an access has to be protected by the
3146 * per-cpu lock.
3147 * @page_cache_work: A work to refill the cache when it is empty
3148 * @backoff_page_cache_fill: Delay cache refills
3149 * @work_in_progress: Indicates that page_cache_work is running
3150 * @hrtimer: A hrtimer for scheduling a page_cache_work
3151 * @nr_bkv_objs: number of allocated objects at @bkvcache.
3152 *
3153 * This is a per-CPU structure. The reason that it is not included in
3154 * the rcu_data structure is to permit this code to be extracted from
3155 * the RCU files. Such extraction could allow further optimization of
3156 * the interactions with the slab allocators.
3157 */
3158 struct kfree_rcu_cpu {
3159 struct rcu_head *head;
3160 struct kvfree_rcu_bulk_data *bkvhead[FREE_N_CHANNELS];
3161 struct kfree_rcu_cpu_work krw_arr[KFREE_N_BATCHES];
3162 raw_spinlock_t lock;
3163 struct delayed_work monitor_work;
3164 bool monitor_todo;
3165 bool initialized;
3166 int count;
3167
3168 struct delayed_work page_cache_work;
3169 atomic_t backoff_page_cache_fill;
3170 atomic_t work_in_progress;
3171 struct hrtimer hrtimer;
3172
3173 struct llist_head bkvcache;
3174 int nr_bkv_objs;
3175 };
3176
3177 static DEFINE_PER_CPU(struct kfree_rcu_cpu, krc) = {
3178 .lock = __RAW_SPIN_LOCK_UNLOCKED(krc.lock),
3179 };
3180
3181 static __always_inline void
debug_rcu_bhead_unqueue(struct kvfree_rcu_bulk_data * bhead)3182 debug_rcu_bhead_unqueue(struct kvfree_rcu_bulk_data *bhead)
3183 {
3184 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
3185 int i;
3186
3187 for (i = 0; i < bhead->nr_records; i++)
3188 debug_rcu_head_unqueue((struct rcu_head *)(bhead->records[i]));
3189 #endif
3190 }
3191
3192 static inline struct kfree_rcu_cpu *
krc_this_cpu_lock(unsigned long * flags)3193 krc_this_cpu_lock(unsigned long *flags)
3194 {
3195 struct kfree_rcu_cpu *krcp;
3196
3197 local_irq_save(*flags); // For safely calling this_cpu_ptr().
3198 krcp = this_cpu_ptr(&krc);
3199 raw_spin_lock(&krcp->lock);
3200
3201 return krcp;
3202 }
3203
3204 static inline void
krc_this_cpu_unlock(struct kfree_rcu_cpu * krcp,unsigned long flags)3205 krc_this_cpu_unlock(struct kfree_rcu_cpu *krcp, unsigned long flags)
3206 {
3207 raw_spin_unlock_irqrestore(&krcp->lock, flags);
3208 }
3209
3210 static inline struct kvfree_rcu_bulk_data *
get_cached_bnode(struct kfree_rcu_cpu * krcp)3211 get_cached_bnode(struct kfree_rcu_cpu *krcp)
3212 {
3213 if (!krcp->nr_bkv_objs)
3214 return NULL;
3215
3216 WRITE_ONCE(krcp->nr_bkv_objs, krcp->nr_bkv_objs - 1);
3217 return (struct kvfree_rcu_bulk_data *)
3218 llist_del_first(&krcp->bkvcache);
3219 }
3220
3221 static inline bool
put_cached_bnode(struct kfree_rcu_cpu * krcp,struct kvfree_rcu_bulk_data * bnode)3222 put_cached_bnode(struct kfree_rcu_cpu *krcp,
3223 struct kvfree_rcu_bulk_data *bnode)
3224 {
3225 // Check the limit.
3226 if (krcp->nr_bkv_objs >= rcu_min_cached_objs)
3227 return false;
3228
3229 llist_add((struct llist_node *) bnode, &krcp->bkvcache);
3230 WRITE_ONCE(krcp->nr_bkv_objs, krcp->nr_bkv_objs + 1);
3231 return true;
3232 }
3233
3234 static int
drain_page_cache(struct kfree_rcu_cpu * krcp)3235 drain_page_cache(struct kfree_rcu_cpu *krcp)
3236 {
3237 unsigned long flags;
3238 struct llist_node *page_list, *pos, *n;
3239 int freed = 0;
3240
3241 raw_spin_lock_irqsave(&krcp->lock, flags);
3242 page_list = llist_del_all(&krcp->bkvcache);
3243 WRITE_ONCE(krcp->nr_bkv_objs, 0);
3244 raw_spin_unlock_irqrestore(&krcp->lock, flags);
3245
3246 llist_for_each_safe(pos, n, page_list) {
3247 free_page((unsigned long)pos);
3248 freed++;
3249 }
3250
3251 return freed;
3252 }
3253
3254 /*
3255 * This function is invoked in workqueue context after a grace period.
3256 * It frees all the objects queued on ->bkvhead_free or ->head_free.
3257 */
kfree_rcu_work(struct work_struct * work)3258 static void kfree_rcu_work(struct work_struct *work)
3259 {
3260 unsigned long flags;
3261 struct kvfree_rcu_bulk_data *bkvhead[FREE_N_CHANNELS], *bnext;
3262 struct rcu_head *head, *next;
3263 struct kfree_rcu_cpu *krcp;
3264 struct kfree_rcu_cpu_work *krwp;
3265 int i, j;
3266
3267 krwp = container_of(to_rcu_work(work),
3268 struct kfree_rcu_cpu_work, rcu_work);
3269 krcp = krwp->krcp;
3270
3271 raw_spin_lock_irqsave(&krcp->lock, flags);
3272 // Channels 1 and 2.
3273 for (i = 0; i < FREE_N_CHANNELS; i++) {
3274 bkvhead[i] = krwp->bkvhead_free[i];
3275 krwp->bkvhead_free[i] = NULL;
3276 }
3277
3278 // Channel 3.
3279 head = krwp->head_free;
3280 krwp->head_free = NULL;
3281 raw_spin_unlock_irqrestore(&krcp->lock, flags);
3282
3283 // Handle the first two channels.
3284 for (i = 0; i < FREE_N_CHANNELS; i++) {
3285 for (; bkvhead[i]; bkvhead[i] = bnext) {
3286 bnext = bkvhead[i]->next;
3287 debug_rcu_bhead_unqueue(bkvhead[i]);
3288
3289 rcu_lock_acquire(&rcu_callback_map);
3290 if (i == 0) { // kmalloc() / kfree().
3291 trace_rcu_invoke_kfree_bulk_callback(
3292 rcu_state.name, bkvhead[i]->nr_records,
3293 bkvhead[i]->records);
3294
3295 kfree_bulk(bkvhead[i]->nr_records,
3296 bkvhead[i]->records);
3297 } else { // vmalloc() / vfree().
3298 for (j = 0; j < bkvhead[i]->nr_records; j++) {
3299 trace_rcu_invoke_kvfree_callback(
3300 rcu_state.name,
3301 bkvhead[i]->records[j], 0);
3302
3303 vfree(bkvhead[i]->records[j]);
3304 }
3305 }
3306 rcu_lock_release(&rcu_callback_map);
3307
3308 raw_spin_lock_irqsave(&krcp->lock, flags);
3309 if (put_cached_bnode(krcp, bkvhead[i]))
3310 bkvhead[i] = NULL;
3311 raw_spin_unlock_irqrestore(&krcp->lock, flags);
3312
3313 if (bkvhead[i])
3314 free_page((unsigned long) bkvhead[i]);
3315
3316 cond_resched_tasks_rcu_qs();
3317 }
3318 }
3319
3320 /*
3321 * This is used when the "bulk" path can not be used for the
3322 * double-argument of kvfree_rcu(). This happens when the
3323 * page-cache is empty, which means that objects are instead
3324 * queued on a linked list through their rcu_head structures.
3325 * This list is named "Channel 3".
3326 */
3327 for (; head; head = next) {
3328 unsigned long offset = (unsigned long)head->func;
3329 void *ptr = (void *)head - offset;
3330
3331 next = head->next;
3332 debug_rcu_head_unqueue((struct rcu_head *)ptr);
3333 rcu_lock_acquire(&rcu_callback_map);
3334 trace_rcu_invoke_kvfree_callback(rcu_state.name, head, offset);
3335
3336 if (!WARN_ON_ONCE(!__is_kvfree_rcu_offset(offset)))
3337 kvfree(ptr);
3338
3339 rcu_lock_release(&rcu_callback_map);
3340 cond_resched_tasks_rcu_qs();
3341 }
3342 }
3343
3344 static bool
need_offload_krc(struct kfree_rcu_cpu * krcp)3345 need_offload_krc(struct kfree_rcu_cpu *krcp)
3346 {
3347 int i;
3348
3349 for (i = 0; i < FREE_N_CHANNELS; i++)
3350 if (krcp->bkvhead[i])
3351 return true;
3352
3353 return !!krcp->head;
3354 }
3355
3356 static bool
need_wait_for_krwp_work(struct kfree_rcu_cpu_work * krwp)3357 need_wait_for_krwp_work(struct kfree_rcu_cpu_work *krwp)
3358 {
3359 int i;
3360
3361 for (i = 0; i < FREE_N_CHANNELS; i++)
3362 if (krwp->bkvhead_free[i])
3363 return true;
3364
3365 return !!krwp->head_free;
3366 }
3367
3368 /*
3369 * This function is invoked after the KFREE_DRAIN_JIFFIES timeout.
3370 */
kfree_rcu_monitor(struct work_struct * work)3371 static void kfree_rcu_monitor(struct work_struct *work)
3372 {
3373 struct kfree_rcu_cpu *krcp = container_of(work,
3374 struct kfree_rcu_cpu, monitor_work.work);
3375 unsigned long flags;
3376 int i, j;
3377
3378 raw_spin_lock_irqsave(&krcp->lock, flags);
3379
3380 // Attempt to start a new batch.
3381 for (i = 0; i < KFREE_N_BATCHES; i++) {
3382 struct kfree_rcu_cpu_work *krwp = &(krcp->krw_arr[i]);
3383
3384 // Try to detach bulk_head or head and attach it, only when
3385 // all channels are free. Any channel is not free means at krwp
3386 // there is on-going rcu work to handle krwp's free business.
3387 if (need_wait_for_krwp_work(krwp))
3388 continue;
3389
3390 if (need_offload_krc(krcp)) {
3391 // Channel 1 corresponds to the SLAB-pointer bulk path.
3392 // Channel 2 corresponds to vmalloc-pointer bulk path.
3393 for (j = 0; j < FREE_N_CHANNELS; j++) {
3394 if (!krwp->bkvhead_free[j]) {
3395 krwp->bkvhead_free[j] = krcp->bkvhead[j];
3396 krcp->bkvhead[j] = NULL;
3397 }
3398 }
3399
3400 // Channel 3 corresponds to both SLAB and vmalloc
3401 // objects queued on the linked list.
3402 if (!krwp->head_free) {
3403 krwp->head_free = krcp->head;
3404 krcp->head = NULL;
3405 }
3406
3407 WRITE_ONCE(krcp->count, 0);
3408
3409 // One work is per one batch, so there are three
3410 // "free channels", the batch can handle. It can
3411 // be that the work is in the pending state when
3412 // channels have been detached following by each
3413 // other.
3414 queue_rcu_work(system_wq, &krwp->rcu_work);
3415 }
3416 }
3417
3418 // If there is nothing to detach, it means that our job is
3419 // successfully done here. In case of having at least one
3420 // of the channels that is still busy we should rearm the
3421 // work to repeat an attempt. Because previous batches are
3422 // still in progress.
3423 if (!krcp->bkvhead[0] && !krcp->bkvhead[1] && !krcp->head)
3424 krcp->monitor_todo = false;
3425 else
3426 schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
3427
3428 raw_spin_unlock_irqrestore(&krcp->lock, flags);
3429 }
3430
3431 static enum hrtimer_restart
schedule_page_work_fn(struct hrtimer * t)3432 schedule_page_work_fn(struct hrtimer *t)
3433 {
3434 struct kfree_rcu_cpu *krcp =
3435 container_of(t, struct kfree_rcu_cpu, hrtimer);
3436
3437 queue_delayed_work(system_highpri_wq, &krcp->page_cache_work, 0);
3438 return HRTIMER_NORESTART;
3439 }
3440
fill_page_cache_func(struct work_struct * work)3441 static void fill_page_cache_func(struct work_struct *work)
3442 {
3443 struct kvfree_rcu_bulk_data *bnode;
3444 struct kfree_rcu_cpu *krcp =
3445 container_of(work, struct kfree_rcu_cpu,
3446 page_cache_work.work);
3447 unsigned long flags;
3448 int nr_pages;
3449 bool pushed;
3450 int i;
3451
3452 nr_pages = atomic_read(&krcp->backoff_page_cache_fill) ?
3453 1 : rcu_min_cached_objs;
3454
3455 for (i = 0; i < nr_pages; i++) {
3456 bnode = (struct kvfree_rcu_bulk_data *)
3457 __get_free_page(GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
3458
3459 if (!bnode)
3460 break;
3461
3462 raw_spin_lock_irqsave(&krcp->lock, flags);
3463 pushed = put_cached_bnode(krcp, bnode);
3464 raw_spin_unlock_irqrestore(&krcp->lock, flags);
3465
3466 if (!pushed) {
3467 free_page((unsigned long) bnode);
3468 break;
3469 }
3470 }
3471
3472 atomic_set(&krcp->work_in_progress, 0);
3473 atomic_set(&krcp->backoff_page_cache_fill, 0);
3474 }
3475
3476 static void
run_page_cache_worker(struct kfree_rcu_cpu * krcp)3477 run_page_cache_worker(struct kfree_rcu_cpu *krcp)
3478 {
3479 if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
3480 !atomic_xchg(&krcp->work_in_progress, 1)) {
3481 if (atomic_read(&krcp->backoff_page_cache_fill)) {
3482 queue_delayed_work(system_wq,
3483 &krcp->page_cache_work,
3484 msecs_to_jiffies(rcu_delay_page_cache_fill_msec));
3485 } else {
3486 hrtimer_init(&krcp->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3487 krcp->hrtimer.function = schedule_page_work_fn;
3488 hrtimer_start(&krcp->hrtimer, 0, HRTIMER_MODE_REL);
3489 }
3490 }
3491 }
3492
3493 // Record ptr in a page managed by krcp, with the pre-krc_this_cpu_lock()
3494 // state specified by flags. If can_alloc is true, the caller must
3495 // be schedulable and not be holding any locks or mutexes that might be
3496 // acquired by the memory allocator or anything that it might invoke.
3497 // Returns true if ptr was successfully recorded, else the caller must
3498 // use a fallback.
3499 static inline bool
add_ptr_to_bulk_krc_lock(struct kfree_rcu_cpu ** krcp,unsigned long * flags,void * ptr,bool can_alloc)3500 add_ptr_to_bulk_krc_lock(struct kfree_rcu_cpu **krcp,
3501 unsigned long *flags, void *ptr, bool can_alloc)
3502 {
3503 struct kvfree_rcu_bulk_data *bnode;
3504 int idx;
3505
3506 *krcp = krc_this_cpu_lock(flags);
3507 if (unlikely(!(*krcp)->initialized))
3508 return false;
3509
3510 idx = !!is_vmalloc_addr(ptr);
3511
3512 /* Check if a new block is required. */
3513 if (!(*krcp)->bkvhead[idx] ||
3514 (*krcp)->bkvhead[idx]->nr_records == KVFREE_BULK_MAX_ENTR) {
3515 bnode = get_cached_bnode(*krcp);
3516 if (!bnode && can_alloc) {
3517 krc_this_cpu_unlock(*krcp, *flags);
3518
3519 // __GFP_NORETRY - allows a light-weight direct reclaim
3520 // what is OK from minimizing of fallback hitting point of
3521 // view. Apart of that it forbids any OOM invoking what is
3522 // also beneficial since we are about to release memory soon.
3523 //
3524 // __GFP_NOMEMALLOC - prevents from consuming of all the
3525 // memory reserves. Please note we have a fallback path.
3526 //
3527 // __GFP_NOWARN - it is supposed that an allocation can
3528 // be failed under low memory or high memory pressure
3529 // scenarios.
3530 bnode = (struct kvfree_rcu_bulk_data *)
3531 __get_free_page(GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
3532 *krcp = krc_this_cpu_lock(flags);
3533 }
3534
3535 if (!bnode)
3536 return false;
3537
3538 /* Initialize the new block. */
3539 bnode->nr_records = 0;
3540 bnode->next = (*krcp)->bkvhead[idx];
3541
3542 /* Attach it to the head. */
3543 (*krcp)->bkvhead[idx] = bnode;
3544 }
3545
3546 /* Finally insert. */
3547 (*krcp)->bkvhead[idx]->records
3548 [(*krcp)->bkvhead[idx]->nr_records++] = ptr;
3549
3550 return true;
3551 }
3552
3553 /*
3554 * Queue a request for lazy invocation of the appropriate free routine
3555 * after a grace period. Please note that three paths are maintained,
3556 * two for the common case using arrays of pointers and a third one that
3557 * is used only when the main paths cannot be used, for example, due to
3558 * memory pressure.
3559 *
3560 * Each kvfree_call_rcu() request is added to a batch. The batch will be drained
3561 * every KFREE_DRAIN_JIFFIES number of jiffies. All the objects in the batch will
3562 * be free'd in workqueue context. This allows us to: batch requests together to
3563 * reduce the number of grace periods during heavy kfree_rcu()/kvfree_rcu() load.
3564 */
kvfree_call_rcu(struct rcu_head * head,rcu_callback_t func)3565 void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
3566 {
3567 unsigned long flags;
3568 struct kfree_rcu_cpu *krcp;
3569 bool success;
3570 void *ptr;
3571
3572 if (head) {
3573 ptr = (void *) head - (unsigned long) func;
3574 } else {
3575 /*
3576 * Please note there is a limitation for the head-less
3577 * variant, that is why there is a clear rule for such
3578 * objects: it can be used from might_sleep() context
3579 * only. For other places please embed an rcu_head to
3580 * your data.
3581 */
3582 might_sleep();
3583 ptr = (unsigned long *) func;
3584 }
3585
3586 // Queue the object but don't yet schedule the batch.
3587 if (debug_rcu_head_queue(ptr)) {
3588 // Probable double kfree_rcu(), just leak.
3589 WARN_ONCE(1, "%s(): Double-freed call. rcu_head %p\n",
3590 __func__, head);
3591
3592 // Mark as success and leave.
3593 return;
3594 }
3595
3596 kasan_record_aux_stack_noalloc(ptr);
3597 success = add_ptr_to_bulk_krc_lock(&krcp, &flags, ptr, !head);
3598 if (!success) {
3599 run_page_cache_worker(krcp);
3600
3601 if (head == NULL)
3602 // Inline if kvfree_rcu(one_arg) call.
3603 goto unlock_return;
3604
3605 head->func = func;
3606 head->next = krcp->head;
3607 krcp->head = head;
3608 success = true;
3609 }
3610
3611 WRITE_ONCE(krcp->count, krcp->count + 1);
3612
3613 /*
3614 * The kvfree_rcu() caller considers the pointer freed at this point
3615 * and likely removes any references to it. Since the actual slab
3616 * freeing (and kmemleak_free()) is deferred, tell kmemleak to ignore
3617 * this object (no scanning or false positives reporting).
3618 */
3619 kmemleak_ignore(ptr);
3620
3621 // Set timer to drain after KFREE_DRAIN_JIFFIES.
3622 if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
3623 !krcp->monitor_todo) {
3624 krcp->monitor_todo = true;
3625 schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
3626 }
3627
3628 unlock_return:
3629 krc_this_cpu_unlock(krcp, flags);
3630
3631 /*
3632 * Inline kvfree() after synchronize_rcu(). We can do
3633 * it from might_sleep() context only, so the current
3634 * CPU can pass the QS state.
3635 */
3636 if (!success) {
3637 debug_rcu_head_unqueue((struct rcu_head *) ptr);
3638 synchronize_rcu();
3639 kvfree(ptr);
3640 }
3641 }
3642 EXPORT_SYMBOL_GPL(kvfree_call_rcu);
3643
3644 static unsigned long
kfree_rcu_shrink_count(struct shrinker * shrink,struct shrink_control * sc)3645 kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
3646 {
3647 int cpu;
3648 unsigned long count = 0;
3649
3650 /* Snapshot count of all CPUs */
3651 for_each_possible_cpu(cpu) {
3652 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3653
3654 count += READ_ONCE(krcp->count);
3655 count += READ_ONCE(krcp->nr_bkv_objs);
3656 atomic_set(&krcp->backoff_page_cache_fill, 1);
3657 }
3658
3659 return count;
3660 }
3661
3662 static unsigned long
kfree_rcu_shrink_scan(struct shrinker * shrink,struct shrink_control * sc)3663 kfree_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
3664 {
3665 int cpu, freed = 0;
3666
3667 for_each_possible_cpu(cpu) {
3668 int count;
3669 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3670
3671 count = krcp->count;
3672 count += drain_page_cache(krcp);
3673 kfree_rcu_monitor(&krcp->monitor_work.work);
3674
3675 sc->nr_to_scan -= count;
3676 freed += count;
3677
3678 if (sc->nr_to_scan <= 0)
3679 break;
3680 }
3681
3682 return freed == 0 ? SHRINK_STOP : freed;
3683 }
3684
3685 static struct shrinker kfree_rcu_shrinker = {
3686 .count_objects = kfree_rcu_shrink_count,
3687 .scan_objects = kfree_rcu_shrink_scan,
3688 .batch = 0,
3689 .seeks = DEFAULT_SEEKS,
3690 };
3691
kfree_rcu_scheduler_running(void)3692 void __init kfree_rcu_scheduler_running(void)
3693 {
3694 int cpu;
3695 unsigned long flags;
3696
3697 for_each_possible_cpu(cpu) {
3698 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3699
3700 raw_spin_lock_irqsave(&krcp->lock, flags);
3701 if ((!krcp->bkvhead[0] && !krcp->bkvhead[1] && !krcp->head) ||
3702 krcp->monitor_todo) {
3703 raw_spin_unlock_irqrestore(&krcp->lock, flags);
3704 continue;
3705 }
3706 krcp->monitor_todo = true;
3707 schedule_delayed_work_on(cpu, &krcp->monitor_work,
3708 KFREE_DRAIN_JIFFIES);
3709 raw_spin_unlock_irqrestore(&krcp->lock, flags);
3710 }
3711 }
3712
3713 /*
3714 * During early boot, any blocking grace-period wait automatically
3715 * implies a grace period. Later on, this is never the case for PREEMPTION.
3716 *
3717 * However, because a context switch is a grace period for !PREEMPTION, any
3718 * blocking grace-period wait automatically implies a grace period if
3719 * there is only one CPU online at any point time during execution of
3720 * either synchronize_rcu() or synchronize_rcu_expedited(). It is OK to
3721 * occasionally incorrectly indicate that there are multiple CPUs online
3722 * when there was in fact only one the whole time, as this just adds some
3723 * overhead: RCU still operates correctly.
3724 */
rcu_blocking_is_gp(void)3725 static int rcu_blocking_is_gp(void)
3726 {
3727 int ret;
3728
3729 if (IS_ENABLED(CONFIG_PREEMPTION))
3730 return rcu_scheduler_active == RCU_SCHEDULER_INACTIVE;
3731 might_sleep(); /* Check for RCU read-side critical section. */
3732 preempt_disable();
3733 /*
3734 * If the rcu_state.n_online_cpus counter is equal to one,
3735 * there is only one CPU, and that CPU sees all prior accesses
3736 * made by any CPU that was online at the time of its access.
3737 * Furthermore, if this counter is equal to one, its value cannot
3738 * change until after the preempt_enable() below.
3739 *
3740 * Furthermore, if rcu_state.n_online_cpus is equal to one here,
3741 * all later CPUs (both this one and any that come online later
3742 * on) are guaranteed to see all accesses prior to this point
3743 * in the code, without the need for additional memory barriers.
3744 * Those memory barriers are provided by CPU-hotplug code.
3745 */
3746 ret = READ_ONCE(rcu_state.n_online_cpus) <= 1;
3747 preempt_enable();
3748 return ret;
3749 }
3750
3751 /**
3752 * synchronize_rcu - wait until a grace period has elapsed.
3753 *
3754 * Control will return to the caller some time after a full grace
3755 * period has elapsed, in other words after all currently executing RCU
3756 * read-side critical sections have completed. Note, however, that
3757 * upon return from synchronize_rcu(), the caller might well be executing
3758 * concurrently with new RCU read-side critical sections that began while
3759 * synchronize_rcu() was waiting.
3760 *
3761 * RCU read-side critical sections are delimited by rcu_read_lock()
3762 * and rcu_read_unlock(), and may be nested. In addition, but only in
3763 * v5.0 and later, regions of code across which interrupts, preemption,
3764 * or softirqs have been disabled also serve as RCU read-side critical
3765 * sections. This includes hardware interrupt handlers, softirq handlers,
3766 * and NMI handlers.
3767 *
3768 * Note that this guarantee implies further memory-ordering guarantees.
3769 * On systems with more than one CPU, when synchronize_rcu() returns,
3770 * each CPU is guaranteed to have executed a full memory barrier since
3771 * the end of its last RCU read-side critical section whose beginning
3772 * preceded the call to synchronize_rcu(). In addition, each CPU having
3773 * an RCU read-side critical section that extends beyond the return from
3774 * synchronize_rcu() is guaranteed to have executed a full memory barrier
3775 * after the beginning of synchronize_rcu() and before the beginning of
3776 * that RCU read-side critical section. Note that these guarantees include
3777 * CPUs that are offline, idle, or executing in user mode, as well as CPUs
3778 * that are executing in the kernel.
3779 *
3780 * Furthermore, if CPU A invoked synchronize_rcu(), which returned
3781 * to its caller on CPU B, then both CPU A and CPU B are guaranteed
3782 * to have executed a full memory barrier during the execution of
3783 * synchronize_rcu() -- even if CPU A and CPU B are the same CPU (but
3784 * again only if the system has more than one CPU).
3785 *
3786 * Implementation of these memory-ordering guarantees is described here:
3787 * Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst.
3788 */
synchronize_rcu(void)3789 void synchronize_rcu(void)
3790 {
3791 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
3792 lock_is_held(&rcu_lock_map) ||
3793 lock_is_held(&rcu_sched_lock_map),
3794 "Illegal synchronize_rcu() in RCU read-side critical section");
3795 if (rcu_blocking_is_gp())
3796 return; // Context allows vacuous grace periods.
3797 if (rcu_gp_is_expedited())
3798 synchronize_rcu_expedited();
3799 else
3800 wait_rcu_gp(call_rcu);
3801 }
3802 EXPORT_SYMBOL_GPL(synchronize_rcu);
3803
3804 /**
3805 * get_state_synchronize_rcu - Snapshot current RCU state
3806 *
3807 * Returns a cookie that is used by a later call to cond_synchronize_rcu()
3808 * or poll_state_synchronize_rcu() to determine whether or not a full
3809 * grace period has elapsed in the meantime.
3810 */
get_state_synchronize_rcu(void)3811 unsigned long get_state_synchronize_rcu(void)
3812 {
3813 /*
3814 * Any prior manipulation of RCU-protected data must happen
3815 * before the load from ->gp_seq.
3816 */
3817 smp_mb(); /* ^^^ */
3818 return rcu_seq_snap(&rcu_state.gp_seq);
3819 }
3820 EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
3821
3822 /**
3823 * start_poll_synchronize_rcu - Snapshot and start RCU grace period
3824 *
3825 * Returns a cookie that is used by a later call to cond_synchronize_rcu()
3826 * or poll_state_synchronize_rcu() to determine whether or not a full
3827 * grace period has elapsed in the meantime. If the needed grace period
3828 * is not already slated to start, notifies RCU core of the need for that
3829 * grace period.
3830 *
3831 * Interrupts must be enabled for the case where it is necessary to awaken
3832 * the grace-period kthread.
3833 */
start_poll_synchronize_rcu(void)3834 unsigned long start_poll_synchronize_rcu(void)
3835 {
3836 unsigned long flags;
3837 unsigned long gp_seq = get_state_synchronize_rcu();
3838 bool needwake;
3839 struct rcu_data *rdp;
3840 struct rcu_node *rnp;
3841
3842 lockdep_assert_irqs_enabled();
3843 local_irq_save(flags);
3844 rdp = this_cpu_ptr(&rcu_data);
3845 rnp = rdp->mynode;
3846 raw_spin_lock_rcu_node(rnp); // irqs already disabled.
3847 needwake = rcu_start_this_gp(rnp, rdp, gp_seq);
3848 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3849 if (needwake)
3850 rcu_gp_kthread_wake();
3851 return gp_seq;
3852 }
3853 EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu);
3854
3855 /**
3856 * poll_state_synchronize_rcu - Conditionally wait for an RCU grace period
3857 *
3858 * @oldstate: value from get_state_synchronize_rcu() or start_poll_synchronize_rcu()
3859 *
3860 * If a full RCU grace period has elapsed since the earlier call from
3861 * which oldstate was obtained, return @true, otherwise return @false.
3862 * If @false is returned, it is the caller's responsibility to invoke this
3863 * function later on until it does return @true. Alternatively, the caller
3864 * can explicitly wait for a grace period, for example, by passing @oldstate
3865 * to cond_synchronize_rcu() or by directly invoking synchronize_rcu().
3866 *
3867 * Yes, this function does not take counter wrap into account.
3868 * But counter wrap is harmless. If the counter wraps, we have waited for
3869 * more than 2 billion grace periods (and way more on a 64-bit system!).
3870 * Those needing to keep oldstate values for very long time periods
3871 * (many hours even on 32-bit systems) should check them occasionally
3872 * and either refresh them or set a flag indicating that the grace period
3873 * has completed.
3874 *
3875 * This function provides the same memory-ordering guarantees that
3876 * would be provided by a synchronize_rcu() that was invoked at the call
3877 * to the function that provided @oldstate, and that returned at the end
3878 * of this function.
3879 */
poll_state_synchronize_rcu(unsigned long oldstate)3880 bool poll_state_synchronize_rcu(unsigned long oldstate)
3881 {
3882 if (rcu_seq_done(&rcu_state.gp_seq, oldstate)) {
3883 smp_mb(); /* Ensure GP ends before subsequent accesses. */
3884 return true;
3885 }
3886 return false;
3887 }
3888 EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu);
3889
3890 /**
3891 * cond_synchronize_rcu - Conditionally wait for an RCU grace period
3892 *
3893 * @oldstate: value from get_state_synchronize_rcu() or start_poll_synchronize_rcu()
3894 *
3895 * If a full RCU grace period has elapsed since the earlier call to
3896 * get_state_synchronize_rcu() or start_poll_synchronize_rcu(), just return.
3897 * Otherwise, invoke synchronize_rcu() to wait for a full grace period.
3898 *
3899 * Yes, this function does not take counter wrap into account. But
3900 * counter wrap is harmless. If the counter wraps, we have waited for
3901 * more than 2 billion grace periods (and way more on a 64-bit system!),
3902 * so waiting for one additional grace period should be just fine.
3903 *
3904 * This function provides the same memory-ordering guarantees that
3905 * would be provided by a synchronize_rcu() that was invoked at the call
3906 * to the function that provided @oldstate, and that returned at the end
3907 * of this function.
3908 */
cond_synchronize_rcu(unsigned long oldstate)3909 void cond_synchronize_rcu(unsigned long oldstate)
3910 {
3911 if (!poll_state_synchronize_rcu(oldstate))
3912 synchronize_rcu();
3913 }
3914 EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
3915
3916 /*
3917 * Check to see if there is any immediate RCU-related work to be done by
3918 * the current CPU, returning 1 if so and zero otherwise. The checks are
3919 * in order of increasing expense: checks that can be carried out against
3920 * CPU-local state are performed first. However, we must check for CPU
3921 * stalls first, else we might not get a chance.
3922 */
rcu_pending(int user)3923 static int rcu_pending(int user)
3924 {
3925 bool gp_in_progress;
3926 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
3927 struct rcu_node *rnp = rdp->mynode;
3928
3929 lockdep_assert_irqs_disabled();
3930
3931 /* Check for CPU stalls, if enabled. */
3932 check_cpu_stall(rdp);
3933
3934 /* Does this CPU need a deferred NOCB wakeup? */
3935 if (rcu_nocb_need_deferred_wakeup(rdp, RCU_NOCB_WAKE))
3936 return 1;
3937
3938 /* Is this a nohz_full CPU in userspace or idle? (Ignore RCU if so.) */
3939 if ((user || rcu_is_cpu_rrupt_from_idle()) && rcu_nohz_full_cpu())
3940 return 0;
3941
3942 /* Is the RCU core waiting for a quiescent state from this CPU? */
3943 gp_in_progress = rcu_gp_in_progress();
3944 if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm && gp_in_progress)
3945 return 1;
3946
3947 /* Does this CPU have callbacks ready to invoke? */
3948 if (!rcu_rdp_is_offloaded(rdp) &&
3949 rcu_segcblist_ready_cbs(&rdp->cblist))
3950 return 1;
3951
3952 /* Has RCU gone idle with this CPU needing another grace period? */
3953 if (!gp_in_progress && rcu_segcblist_is_enabled(&rdp->cblist) &&
3954 !rcu_rdp_is_offloaded(rdp) &&
3955 !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
3956 return 1;
3957
3958 /* Have RCU grace period completed or started? */
3959 if (rcu_seq_current(&rnp->gp_seq) != rdp->gp_seq ||
3960 unlikely(READ_ONCE(rdp->gpwrap))) /* outside lock */
3961 return 1;
3962
3963 /* nothing to do */
3964 return 0;
3965 }
3966
3967 /*
3968 * Helper function for rcu_barrier() tracing. If tracing is disabled,
3969 * the compiler is expected to optimize this away.
3970 */
rcu_barrier_trace(const char * s,int cpu,unsigned long done)3971 static void rcu_barrier_trace(const char *s, int cpu, unsigned long done)
3972 {
3973 trace_rcu_barrier(rcu_state.name, s, cpu,
3974 atomic_read(&rcu_state.barrier_cpu_count), done);
3975 }
3976
3977 /*
3978 * RCU callback function for rcu_barrier(). If we are last, wake
3979 * up the task executing rcu_barrier().
3980 *
3981 * Note that the value of rcu_state.barrier_sequence must be captured
3982 * before the atomic_dec_and_test(). Otherwise, if this CPU is not last,
3983 * other CPUs might count the value down to zero before this CPU gets
3984 * around to invoking rcu_barrier_trace(), which might result in bogus
3985 * data from the next instance of rcu_barrier().
3986 */
rcu_barrier_callback(struct rcu_head * rhp)3987 static void rcu_barrier_callback(struct rcu_head *rhp)
3988 {
3989 unsigned long __maybe_unused s = rcu_state.barrier_sequence;
3990
3991 if (atomic_dec_and_test(&rcu_state.barrier_cpu_count)) {
3992 rcu_barrier_trace(TPS("LastCB"), -1, s);
3993 complete(&rcu_state.barrier_completion);
3994 } else {
3995 rcu_barrier_trace(TPS("CB"), -1, s);
3996 }
3997 }
3998
3999 /*
4000 * Called with preemption disabled, and from cross-cpu IRQ context.
4001 */
rcu_barrier_func(void * cpu_in)4002 static void rcu_barrier_func(void *cpu_in)
4003 {
4004 uintptr_t cpu = (uintptr_t)cpu_in;
4005 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4006
4007 rcu_barrier_trace(TPS("IRQ"), -1, rcu_state.barrier_sequence);
4008 rdp->barrier_head.func = rcu_barrier_callback;
4009 debug_rcu_head_queue(&rdp->barrier_head);
4010 rcu_nocb_lock(rdp);
4011 WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies));
4012 if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head)) {
4013 atomic_inc(&rcu_state.barrier_cpu_count);
4014 } else {
4015 debug_rcu_head_unqueue(&rdp->barrier_head);
4016 rcu_barrier_trace(TPS("IRQNQ"), -1,
4017 rcu_state.barrier_sequence);
4018 }
4019 rcu_nocb_unlock(rdp);
4020 }
4021
4022 /**
4023 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
4024 *
4025 * Note that this primitive does not necessarily wait for an RCU grace period
4026 * to complete. For example, if there are no RCU callbacks queued anywhere
4027 * in the system, then rcu_barrier() is within its rights to return
4028 * immediately, without waiting for anything, much less an RCU grace period.
4029 */
rcu_barrier(void)4030 void rcu_barrier(void)
4031 {
4032 uintptr_t cpu;
4033 struct rcu_data *rdp;
4034 unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence);
4035
4036 rcu_barrier_trace(TPS("Begin"), -1, s);
4037
4038 /* Take mutex to serialize concurrent rcu_barrier() requests. */
4039 mutex_lock(&rcu_state.barrier_mutex);
4040
4041 /* Did someone else do our work for us? */
4042 if (rcu_seq_done(&rcu_state.barrier_sequence, s)) {
4043 rcu_barrier_trace(TPS("EarlyExit"), -1,
4044 rcu_state.barrier_sequence);
4045 smp_mb(); /* caller's subsequent code after above check. */
4046 mutex_unlock(&rcu_state.barrier_mutex);
4047 return;
4048 }
4049
4050 /* Mark the start of the barrier operation. */
4051 rcu_seq_start(&rcu_state.barrier_sequence);
4052 rcu_barrier_trace(TPS("Inc1"), -1, rcu_state.barrier_sequence);
4053
4054 /*
4055 * Initialize the count to two rather than to zero in order
4056 * to avoid a too-soon return to zero in case of an immediate
4057 * invocation of the just-enqueued callback (or preemption of
4058 * this task). Exclude CPU-hotplug operations to ensure that no
4059 * offline non-offloaded CPU has callbacks queued.
4060 */
4061 init_completion(&rcu_state.barrier_completion);
4062 atomic_set(&rcu_state.barrier_cpu_count, 2);
4063 cpus_read_lock();
4064
4065 /*
4066 * Force each CPU with callbacks to register a new callback.
4067 * When that callback is invoked, we will know that all of the
4068 * corresponding CPU's preceding callbacks have been invoked.
4069 */
4070 for_each_possible_cpu(cpu) {
4071 rdp = per_cpu_ptr(&rcu_data, cpu);
4072 if (cpu_is_offline(cpu) &&
4073 !rcu_rdp_is_offloaded(rdp))
4074 continue;
4075 if (rcu_segcblist_n_cbs(&rdp->cblist) && cpu_online(cpu)) {
4076 rcu_barrier_trace(TPS("OnlineQ"), cpu,
4077 rcu_state.barrier_sequence);
4078 smp_call_function_single(cpu, rcu_barrier_func, (void *)cpu, 1);
4079 } else if (rcu_segcblist_n_cbs(&rdp->cblist) &&
4080 cpu_is_offline(cpu)) {
4081 rcu_barrier_trace(TPS("OfflineNoCBQ"), cpu,
4082 rcu_state.barrier_sequence);
4083 local_irq_disable();
4084 rcu_barrier_func((void *)cpu);
4085 local_irq_enable();
4086 } else if (cpu_is_offline(cpu)) {
4087 rcu_barrier_trace(TPS("OfflineNoCBNoQ"), cpu,
4088 rcu_state.barrier_sequence);
4089 } else {
4090 rcu_barrier_trace(TPS("OnlineNQ"), cpu,
4091 rcu_state.barrier_sequence);
4092 }
4093 }
4094 cpus_read_unlock();
4095
4096 /*
4097 * Now that we have an rcu_barrier_callback() callback on each
4098 * CPU, and thus each counted, remove the initial count.
4099 */
4100 if (atomic_sub_and_test(2, &rcu_state.barrier_cpu_count))
4101 complete(&rcu_state.barrier_completion);
4102
4103 /* Wait for all rcu_barrier_callback() callbacks to be invoked. */
4104 wait_for_completion(&rcu_state.barrier_completion);
4105
4106 /* Mark the end of the barrier operation. */
4107 rcu_barrier_trace(TPS("Inc2"), -1, rcu_state.barrier_sequence);
4108 rcu_seq_end(&rcu_state.barrier_sequence);
4109
4110 /* Other rcu_barrier() invocations can now safely proceed. */
4111 mutex_unlock(&rcu_state.barrier_mutex);
4112 }
4113 EXPORT_SYMBOL_GPL(rcu_barrier);
4114
4115 /*
4116 * Propagate ->qsinitmask bits up the rcu_node tree to account for the
4117 * first CPU in a given leaf rcu_node structure coming online. The caller
4118 * must hold the corresponding leaf rcu_node ->lock with interrupts
4119 * disabled.
4120 */
rcu_init_new_rnp(struct rcu_node * rnp_leaf)4121 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
4122 {
4123 long mask;
4124 long oldmask;
4125 struct rcu_node *rnp = rnp_leaf;
4126
4127 raw_lockdep_assert_held_rcu_node(rnp_leaf);
4128 WARN_ON_ONCE(rnp->wait_blkd_tasks);
4129 for (;;) {
4130 mask = rnp->grpmask;
4131 rnp = rnp->parent;
4132 if (rnp == NULL)
4133 return;
4134 raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */
4135 oldmask = rnp->qsmaskinit;
4136 rnp->qsmaskinit |= mask;
4137 raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */
4138 if (oldmask)
4139 return;
4140 }
4141 }
4142
4143 /*
4144 * Do boot-time initialization of a CPU's per-CPU RCU data.
4145 */
4146 static void __init
rcu_boot_init_percpu_data(int cpu)4147 rcu_boot_init_percpu_data(int cpu)
4148 {
4149 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4150
4151 /* Set up local state, ensuring consistent view of global state. */
4152 rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
4153 INIT_WORK(&rdp->strict_work, strict_work_handler);
4154 WARN_ON_ONCE(rdp->dynticks_nesting != 1);
4155 WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp)));
4156 rdp->rcu_ofl_gp_seq = rcu_state.gp_seq;
4157 rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED;
4158 rdp->rcu_onl_gp_seq = rcu_state.gp_seq;
4159 rdp->rcu_onl_gp_flags = RCU_GP_CLEANED;
4160 rdp->cpu = cpu;
4161 rcu_boot_init_nocb_percpu_data(rdp);
4162 }
4163
4164 /*
4165 * Invoked early in the CPU-online process, when pretty much all services
4166 * are available. The incoming CPU is not present.
4167 *
4168 * Initializes a CPU's per-CPU RCU data. Note that only one online or
4169 * offline event can be happening at a given time. Note also that we can
4170 * accept some slop in the rsp->gp_seq access due to the fact that this
4171 * CPU cannot possibly have any non-offloaded RCU callbacks in flight yet.
4172 * And any offloaded callbacks are being numbered elsewhere.
4173 */
rcutree_prepare_cpu(unsigned int cpu)4174 int rcutree_prepare_cpu(unsigned int cpu)
4175 {
4176 unsigned long flags;
4177 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4178 struct rcu_node *rnp = rcu_get_root();
4179
4180 /* Set up local state, ensuring consistent view of global state. */
4181 raw_spin_lock_irqsave_rcu_node(rnp, flags);
4182 rdp->qlen_last_fqs_check = 0;
4183 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
4184 rdp->blimit = blimit;
4185 rdp->dynticks_nesting = 1; /* CPU not up, no tearing. */
4186 rcu_dynticks_eqs_online();
4187 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
4188
4189 /*
4190 * Only non-NOCB CPUs that didn't have early-boot callbacks need to be
4191 * (re-)initialized.
4192 */
4193 if (!rcu_segcblist_is_enabled(&rdp->cblist))
4194 rcu_segcblist_init(&rdp->cblist); /* Re-enable callbacks. */
4195
4196 /*
4197 * Add CPU to leaf rcu_node pending-online bitmask. Any needed
4198 * propagation up the rcu_node tree will happen at the beginning
4199 * of the next grace period.
4200 */
4201 rnp = rdp->mynode;
4202 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
4203 rdp->beenonline = true; /* We have now been online. */
4204 rdp->gp_seq = READ_ONCE(rnp->gp_seq);
4205 rdp->gp_seq_needed = rdp->gp_seq;
4206 rdp->cpu_no_qs.b.norm = true;
4207 rdp->core_needs_qs = false;
4208 rdp->rcu_iw_pending = false;
4209 rdp->rcu_iw = IRQ_WORK_INIT_HARD(rcu_iw_handler);
4210 rdp->rcu_iw_gp_seq = rdp->gp_seq - 1;
4211 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl"));
4212 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4213 rcu_spawn_one_boost_kthread(rnp);
4214 rcu_spawn_cpu_nocb_kthread(cpu);
4215 WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus + 1);
4216
4217 return 0;
4218 }
4219
4220 /*
4221 * Update RCU priority boot kthread affinity for CPU-hotplug changes.
4222 */
rcutree_affinity_setting(unsigned int cpu,int outgoing)4223 static void rcutree_affinity_setting(unsigned int cpu, int outgoing)
4224 {
4225 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4226
4227 rcu_boost_kthread_setaffinity(rdp->mynode, outgoing);
4228 }
4229
4230 /*
4231 * Near the end of the CPU-online process. Pretty much all services
4232 * enabled, and the CPU is now very much alive.
4233 */
rcutree_online_cpu(unsigned int cpu)4234 int rcutree_online_cpu(unsigned int cpu)
4235 {
4236 unsigned long flags;
4237 struct rcu_data *rdp;
4238 struct rcu_node *rnp;
4239
4240 rdp = per_cpu_ptr(&rcu_data, cpu);
4241 rnp = rdp->mynode;
4242 raw_spin_lock_irqsave_rcu_node(rnp, flags);
4243 rnp->ffmask |= rdp->grpmask;
4244 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4245 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
4246 return 0; /* Too early in boot for scheduler work. */
4247 sync_sched_exp_online_cleanup(cpu);
4248 rcutree_affinity_setting(cpu, -1);
4249
4250 // Stop-machine done, so allow nohz_full to disable tick.
4251 tick_dep_clear(TICK_DEP_BIT_RCU);
4252 return 0;
4253 }
4254
4255 /*
4256 * Near the beginning of the process. The CPU is still very much alive
4257 * with pretty much all services enabled.
4258 */
rcutree_offline_cpu(unsigned int cpu)4259 int rcutree_offline_cpu(unsigned int cpu)
4260 {
4261 unsigned long flags;
4262 struct rcu_data *rdp;
4263 struct rcu_node *rnp;
4264
4265 rdp = per_cpu_ptr(&rcu_data, cpu);
4266 rnp = rdp->mynode;
4267 raw_spin_lock_irqsave_rcu_node(rnp, flags);
4268 rnp->ffmask &= ~rdp->grpmask;
4269 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4270
4271 rcutree_affinity_setting(cpu, cpu);
4272
4273 // nohz_full CPUs need the tick for stop-machine to work quickly
4274 tick_dep_set(TICK_DEP_BIT_RCU);
4275 return 0;
4276 }
4277
4278 /*
4279 * Mark the specified CPU as being online so that subsequent grace periods
4280 * (both expedited and normal) will wait on it. Note that this means that
4281 * incoming CPUs are not allowed to use RCU read-side critical sections
4282 * until this function is called. Failing to observe this restriction
4283 * will result in lockdep splats.
4284 *
4285 * Note that this function is special in that it is invoked directly
4286 * from the incoming CPU rather than from the cpuhp_step mechanism.
4287 * This is because this function must be invoked at a precise location.
4288 */
rcu_cpu_starting(unsigned int cpu)4289 void rcu_cpu_starting(unsigned int cpu)
4290 {
4291 unsigned long flags;
4292 unsigned long mask;
4293 struct rcu_data *rdp;
4294 struct rcu_node *rnp;
4295 bool newcpu;
4296
4297 rdp = per_cpu_ptr(&rcu_data, cpu);
4298 if (rdp->cpu_started)
4299 return;
4300 rdp->cpu_started = true;
4301
4302 rnp = rdp->mynode;
4303 mask = rdp->grpmask;
4304 WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1);
4305 WARN_ON_ONCE(!(rnp->ofl_seq & 0x1));
4306 smp_mb(); // Pair with rcu_gp_cleanup()'s ->ofl_seq barrier().
4307 raw_spin_lock_irqsave_rcu_node(rnp, flags);
4308 WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext | mask);
4309 newcpu = !(rnp->expmaskinitnext & mask);
4310 rnp->expmaskinitnext |= mask;
4311 /* Allow lockless access for expedited grace periods. */
4312 smp_store_release(&rcu_state.ncpus, rcu_state.ncpus + newcpu); /* ^^^ */
4313 ASSERT_EXCLUSIVE_WRITER(rcu_state.ncpus);
4314 rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */
4315 rdp->rcu_onl_gp_seq = READ_ONCE(rcu_state.gp_seq);
4316 rdp->rcu_onl_gp_flags = READ_ONCE(rcu_state.gp_flags);
4317
4318 /* An incoming CPU should never be blocking a grace period. */
4319 if (WARN_ON_ONCE(rnp->qsmask & mask)) { /* RCU waiting on incoming CPU? */
4320 rcu_disable_urgency_upon_qs(rdp);
4321 /* Report QS -after- changing ->qsmaskinitnext! */
4322 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
4323 } else {
4324 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4325 }
4326 smp_mb(); // Pair with rcu_gp_cleanup()'s ->ofl_seq barrier().
4327 WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1);
4328 WARN_ON_ONCE(rnp->ofl_seq & 0x1);
4329 smp_mb(); /* Ensure RCU read-side usage follows above initialization. */
4330 }
4331
4332 /*
4333 * The outgoing function has no further need of RCU, so remove it from
4334 * the rcu_node tree's ->qsmaskinitnext bit masks.
4335 *
4336 * Note that this function is special in that it is invoked directly
4337 * from the outgoing CPU rather than from the cpuhp_step mechanism.
4338 * This is because this function must be invoked at a precise location.
4339 */
rcu_report_dead(unsigned int cpu)4340 void rcu_report_dead(unsigned int cpu)
4341 {
4342 unsigned long flags;
4343 unsigned long mask;
4344 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4345 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
4346
4347 // Do any dangling deferred wakeups.
4348 do_nocb_deferred_wakeup(rdp);
4349
4350 /* QS for any half-done expedited grace period. */
4351 preempt_disable();
4352 rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
4353 preempt_enable();
4354 rcu_preempt_deferred_qs(current);
4355
4356 /* Remove outgoing CPU from mask in the leaf rcu_node structure. */
4357 mask = rdp->grpmask;
4358 WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1);
4359 WARN_ON_ONCE(!(rnp->ofl_seq & 0x1));
4360 smp_mb(); // Pair with rcu_gp_cleanup()'s ->ofl_seq barrier().
4361 raw_spin_lock(&rcu_state.ofl_lock);
4362 raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
4363 rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq);
4364 rdp->rcu_ofl_gp_flags = READ_ONCE(rcu_state.gp_flags);
4365 if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */
4366 /* Report quiescent state -before- changing ->qsmaskinitnext! */
4367 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
4368 raw_spin_lock_irqsave_rcu_node(rnp, flags);
4369 }
4370 WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext & ~mask);
4371 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4372 raw_spin_unlock(&rcu_state.ofl_lock);
4373 smp_mb(); // Pair with rcu_gp_cleanup()'s ->ofl_seq barrier().
4374 WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1);
4375 WARN_ON_ONCE(rnp->ofl_seq & 0x1);
4376
4377 rdp->cpu_started = false;
4378 }
4379
4380 #ifdef CONFIG_HOTPLUG_CPU
4381 /*
4382 * The outgoing CPU has just passed through the dying-idle state, and we
4383 * are being invoked from the CPU that was IPIed to continue the offline
4384 * operation. Migrate the outgoing CPU's callbacks to the current CPU.
4385 */
rcutree_migrate_callbacks(int cpu)4386 void rcutree_migrate_callbacks(int cpu)
4387 {
4388 unsigned long flags;
4389 struct rcu_data *my_rdp;
4390 struct rcu_node *my_rnp;
4391 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4392 bool needwake;
4393
4394 if (rcu_rdp_is_offloaded(rdp) ||
4395 rcu_segcblist_empty(&rdp->cblist))
4396 return; /* No callbacks to migrate. */
4397
4398 local_irq_save(flags);
4399 my_rdp = this_cpu_ptr(&rcu_data);
4400 my_rnp = my_rdp->mynode;
4401 rcu_nocb_lock(my_rdp); /* irqs already disabled. */
4402 WARN_ON_ONCE(!rcu_nocb_flush_bypass(my_rdp, NULL, jiffies));
4403 raw_spin_lock_rcu_node(my_rnp); /* irqs already disabled. */
4404 /* Leverage recent GPs and set GP for new callbacks. */
4405 needwake = rcu_advance_cbs(my_rnp, rdp) ||
4406 rcu_advance_cbs(my_rnp, my_rdp);
4407 rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist);
4408 needwake = needwake || rcu_advance_cbs(my_rnp, my_rdp);
4409 rcu_segcblist_disable(&rdp->cblist);
4410 WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) !=
4411 !rcu_segcblist_n_cbs(&my_rdp->cblist));
4412 if (rcu_rdp_is_offloaded(my_rdp)) {
4413 raw_spin_unlock_rcu_node(my_rnp); /* irqs remain disabled. */
4414 __call_rcu_nocb_wake(my_rdp, true, flags);
4415 } else {
4416 rcu_nocb_unlock(my_rdp); /* irqs remain disabled. */
4417 raw_spin_unlock_irqrestore_rcu_node(my_rnp, flags);
4418 }
4419 if (needwake)
4420 rcu_gp_kthread_wake();
4421 lockdep_assert_irqs_enabled();
4422 WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 ||
4423 !rcu_segcblist_empty(&rdp->cblist),
4424 "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, 1stCB=%p\n",
4425 cpu, rcu_segcblist_n_cbs(&rdp->cblist),
4426 rcu_segcblist_first_cb(&rdp->cblist));
4427 }
4428 #endif
4429
4430 /*
4431 * On non-huge systems, use expedited RCU grace periods to make suspend
4432 * and hibernation run faster.
4433 */
rcu_pm_notify(struct notifier_block * self,unsigned long action,void * hcpu)4434 static int rcu_pm_notify(struct notifier_block *self,
4435 unsigned long action, void *hcpu)
4436 {
4437 switch (action) {
4438 case PM_HIBERNATION_PREPARE:
4439 case PM_SUSPEND_PREPARE:
4440 rcu_expedite_gp();
4441 break;
4442 case PM_POST_HIBERNATION:
4443 case PM_POST_SUSPEND:
4444 rcu_unexpedite_gp();
4445 break;
4446 default:
4447 break;
4448 }
4449 return NOTIFY_OK;
4450 }
4451
4452 #ifdef CONFIG_RCU_EXP_KTHREAD
4453 struct kthread_worker *rcu_exp_gp_kworker;
4454 struct kthread_worker *rcu_exp_par_gp_kworker;
4455
rcu_start_exp_gp_kworkers(void)4456 static void __init rcu_start_exp_gp_kworkers(void)
4457 {
4458 const char *par_gp_kworker_name = "rcu_exp_par_gp_kthread_worker";
4459 const char *gp_kworker_name = "rcu_exp_gp_kthread_worker";
4460 struct sched_param param = { .sched_priority = kthread_prio };
4461
4462 rcu_exp_gp_kworker = kthread_create_worker(0, gp_kworker_name);
4463 if (IS_ERR_OR_NULL(rcu_exp_gp_kworker)) {
4464 pr_err("Failed to create %s!\n", gp_kworker_name);
4465 return;
4466 }
4467
4468 rcu_exp_par_gp_kworker = kthread_create_worker(0, par_gp_kworker_name);
4469 if (IS_ERR_OR_NULL(rcu_exp_par_gp_kworker)) {
4470 pr_err("Failed to create %s!\n", par_gp_kworker_name);
4471 kthread_destroy_worker(rcu_exp_gp_kworker);
4472 return;
4473 }
4474
4475 sched_setscheduler_nocheck(rcu_exp_gp_kworker->task, SCHED_FIFO, ¶m);
4476 sched_setscheduler_nocheck(rcu_exp_par_gp_kworker->task, SCHED_FIFO,
4477 ¶m);
4478 }
4479
rcu_alloc_par_gp_wq(void)4480 static inline void rcu_alloc_par_gp_wq(void)
4481 {
4482 }
4483 #else /* !CONFIG_RCU_EXP_KTHREAD */
4484 struct workqueue_struct *rcu_par_gp_wq;
4485
rcu_start_exp_gp_kworkers(void)4486 static void __init rcu_start_exp_gp_kworkers(void)
4487 {
4488 }
4489
rcu_alloc_par_gp_wq(void)4490 static inline void rcu_alloc_par_gp_wq(void)
4491 {
4492 rcu_par_gp_wq = alloc_workqueue("rcu_par_gp", WQ_MEM_RECLAIM, 0);
4493 WARN_ON(!rcu_par_gp_wq);
4494 }
4495 #endif /* CONFIG_RCU_EXP_KTHREAD */
4496
4497 /*
4498 * Spawn the kthreads that handle RCU's grace periods.
4499 */
rcu_spawn_gp_kthread(void)4500 static int __init rcu_spawn_gp_kthread(void)
4501 {
4502 unsigned long flags;
4503 int kthread_prio_in = kthread_prio;
4504 struct rcu_node *rnp;
4505 struct sched_param sp;
4506 struct task_struct *t;
4507
4508 /* Force priority into range. */
4509 if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 2
4510 && IS_BUILTIN(CONFIG_RCU_TORTURE_TEST))
4511 kthread_prio = 2;
4512 else if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
4513 kthread_prio = 1;
4514 else if (kthread_prio < 0)
4515 kthread_prio = 0;
4516 else if (kthread_prio > 99)
4517 kthread_prio = 99;
4518
4519 if (kthread_prio != kthread_prio_in)
4520 pr_alert("rcu_spawn_gp_kthread(): Limited prio to %d from %d\n",
4521 kthread_prio, kthread_prio_in);
4522
4523 rcu_scheduler_fully_active = 1;
4524 t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name);
4525 if (WARN_ONCE(IS_ERR(t), "%s: Could not start grace-period kthread, OOM is now expected behavior\n", __func__))
4526 return 0;
4527 if (kthread_prio) {
4528 sp.sched_priority = kthread_prio;
4529 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
4530 }
4531 rnp = rcu_get_root();
4532 raw_spin_lock_irqsave_rcu_node(rnp, flags);
4533 WRITE_ONCE(rcu_state.gp_activity, jiffies);
4534 WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
4535 // Reset .gp_activity and .gp_req_activity before setting .gp_kthread.
4536 smp_store_release(&rcu_state.gp_kthread, t); /* ^^^ */
4537 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4538 wake_up_process(t);
4539 rcu_spawn_nocb_kthreads();
4540 rcu_spawn_boost_kthreads();
4541 rcu_spawn_core_kthreads();
4542 /* Create kthread worker for expedited GPs */
4543 rcu_start_exp_gp_kworkers();
4544 return 0;
4545 }
4546 early_initcall(rcu_spawn_gp_kthread);
4547
4548 /*
4549 * This function is invoked towards the end of the scheduler's
4550 * initialization process. Before this is called, the idle task might
4551 * contain synchronous grace-period primitives (during which time, this idle
4552 * task is booting the system, and such primitives are no-ops). After this
4553 * function is called, any synchronous grace-period primitives are run as
4554 * expedited, with the requesting task driving the grace period forward.
4555 * A later core_initcall() rcu_set_runtime_mode() will switch to full
4556 * runtime RCU functionality.
4557 */
rcu_scheduler_starting(void)4558 void rcu_scheduler_starting(void)
4559 {
4560 WARN_ON(num_online_cpus() != 1);
4561 WARN_ON(nr_context_switches() > 0);
4562 rcu_test_sync_prims();
4563 rcu_scheduler_active = RCU_SCHEDULER_INIT;
4564 rcu_test_sync_prims();
4565 }
4566
4567 /*
4568 * Helper function for rcu_init() that initializes the rcu_state structure.
4569 */
rcu_init_one(void)4570 static void __init rcu_init_one(void)
4571 {
4572 static const char * const buf[] = RCU_NODE_NAME_INIT;
4573 static const char * const fqs[] = RCU_FQS_NAME_INIT;
4574 static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
4575 static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
4576
4577 int levelspread[RCU_NUM_LVLS]; /* kids/node in each level. */
4578 int cpustride = 1;
4579 int i;
4580 int j;
4581 struct rcu_node *rnp;
4582
4583 BUILD_BUG_ON(RCU_NUM_LVLS > ARRAY_SIZE(buf)); /* Fix buf[] init! */
4584
4585 /* Silence gcc 4.8 false positive about array index out of range. */
4586 if (rcu_num_lvls <= 0 || rcu_num_lvls > RCU_NUM_LVLS)
4587 panic("rcu_init_one: rcu_num_lvls out of range");
4588
4589 /* Initialize the level-tracking arrays. */
4590
4591 for (i = 1; i < rcu_num_lvls; i++)
4592 rcu_state.level[i] =
4593 rcu_state.level[i - 1] + num_rcu_lvl[i - 1];
4594 rcu_init_levelspread(levelspread, num_rcu_lvl);
4595
4596 /* Initialize the elements themselves, starting from the leaves. */
4597
4598 for (i = rcu_num_lvls - 1; i >= 0; i--) {
4599 cpustride *= levelspread[i];
4600 rnp = rcu_state.level[i];
4601 for (j = 0; j < num_rcu_lvl[i]; j++, rnp++) {
4602 raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock));
4603 lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock),
4604 &rcu_node_class[i], buf[i]);
4605 raw_spin_lock_init(&rnp->fqslock);
4606 lockdep_set_class_and_name(&rnp->fqslock,
4607 &rcu_fqs_class[i], fqs[i]);
4608 rnp->gp_seq = rcu_state.gp_seq;
4609 rnp->gp_seq_needed = rcu_state.gp_seq;
4610 rnp->completedqs = rcu_state.gp_seq;
4611 rnp->qsmask = 0;
4612 rnp->qsmaskinit = 0;
4613 rnp->grplo = j * cpustride;
4614 rnp->grphi = (j + 1) * cpustride - 1;
4615 if (rnp->grphi >= nr_cpu_ids)
4616 rnp->grphi = nr_cpu_ids - 1;
4617 if (i == 0) {
4618 rnp->grpnum = 0;
4619 rnp->grpmask = 0;
4620 rnp->parent = NULL;
4621 } else {
4622 rnp->grpnum = j % levelspread[i - 1];
4623 rnp->grpmask = BIT(rnp->grpnum);
4624 rnp->parent = rcu_state.level[i - 1] +
4625 j / levelspread[i - 1];
4626 }
4627 rnp->level = i;
4628 INIT_LIST_HEAD(&rnp->blkd_tasks);
4629 rcu_init_one_nocb(rnp);
4630 init_waitqueue_head(&rnp->exp_wq[0]);
4631 init_waitqueue_head(&rnp->exp_wq[1]);
4632 init_waitqueue_head(&rnp->exp_wq[2]);
4633 init_waitqueue_head(&rnp->exp_wq[3]);
4634 spin_lock_init(&rnp->exp_lock);
4635 }
4636 }
4637
4638 init_swait_queue_head(&rcu_state.gp_wq);
4639 init_swait_queue_head(&rcu_state.expedited_wq);
4640 rnp = rcu_first_leaf_node();
4641 for_each_possible_cpu(i) {
4642 while (i > rnp->grphi)
4643 rnp++;
4644 per_cpu_ptr(&rcu_data, i)->mynode = rnp;
4645 rcu_boot_init_percpu_data(i);
4646 }
4647 }
4648
4649 /*
4650 * Compute the rcu_node tree geometry from kernel parameters. This cannot
4651 * replace the definitions in tree.h because those are needed to size
4652 * the ->node array in the rcu_state structure.
4653 */
rcu_init_geometry(void)4654 void rcu_init_geometry(void)
4655 {
4656 ulong d;
4657 int i;
4658 static unsigned long old_nr_cpu_ids;
4659 int rcu_capacity[RCU_NUM_LVLS];
4660 static bool initialized;
4661
4662 if (initialized) {
4663 /*
4664 * Warn if setup_nr_cpu_ids() had not yet been invoked,
4665 * unless nr_cpus_ids == NR_CPUS, in which case who cares?
4666 */
4667 WARN_ON_ONCE(old_nr_cpu_ids != nr_cpu_ids);
4668 return;
4669 }
4670
4671 old_nr_cpu_ids = nr_cpu_ids;
4672 initialized = true;
4673
4674 /*
4675 * Initialize any unspecified boot parameters.
4676 * The default values of jiffies_till_first_fqs and
4677 * jiffies_till_next_fqs are set to the RCU_JIFFIES_TILL_FORCE_QS
4678 * value, which is a function of HZ, then adding one for each
4679 * RCU_JIFFIES_FQS_DIV CPUs that might be on the system.
4680 */
4681 d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
4682 if (jiffies_till_first_fqs == ULONG_MAX)
4683 jiffies_till_first_fqs = d;
4684 if (jiffies_till_next_fqs == ULONG_MAX)
4685 jiffies_till_next_fqs = d;
4686 adjust_jiffies_till_sched_qs();
4687
4688 /* If the compile-time values are accurate, just leave. */
4689 if (rcu_fanout_leaf == RCU_FANOUT_LEAF &&
4690 nr_cpu_ids == NR_CPUS)
4691 return;
4692 pr_info("Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n",
4693 rcu_fanout_leaf, nr_cpu_ids);
4694
4695 /*
4696 * The boot-time rcu_fanout_leaf parameter must be at least two
4697 * and cannot exceed the number of bits in the rcu_node masks.
4698 * Complain and fall back to the compile-time values if this
4699 * limit is exceeded.
4700 */
4701 if (rcu_fanout_leaf < 2 ||
4702 rcu_fanout_leaf > sizeof(unsigned long) * 8) {
4703 rcu_fanout_leaf = RCU_FANOUT_LEAF;
4704 WARN_ON(1);
4705 return;
4706 }
4707
4708 /*
4709 * Compute number of nodes that can be handled an rcu_node tree
4710 * with the given number of levels.
4711 */
4712 rcu_capacity[0] = rcu_fanout_leaf;
4713 for (i = 1; i < RCU_NUM_LVLS; i++)
4714 rcu_capacity[i] = rcu_capacity[i - 1] * RCU_FANOUT;
4715
4716 /*
4717 * The tree must be able to accommodate the configured number of CPUs.
4718 * If this limit is exceeded, fall back to the compile-time values.
4719 */
4720 if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1]) {
4721 rcu_fanout_leaf = RCU_FANOUT_LEAF;
4722 WARN_ON(1);
4723 return;
4724 }
4725
4726 /* Calculate the number of levels in the tree. */
4727 for (i = 0; nr_cpu_ids > rcu_capacity[i]; i++) {
4728 }
4729 rcu_num_lvls = i + 1;
4730
4731 /* Calculate the number of rcu_nodes at each level of the tree. */
4732 for (i = 0; i < rcu_num_lvls; i++) {
4733 int cap = rcu_capacity[(rcu_num_lvls - 1) - i];
4734 num_rcu_lvl[i] = DIV_ROUND_UP(nr_cpu_ids, cap);
4735 }
4736
4737 /* Calculate the total number of rcu_node structures. */
4738 rcu_num_nodes = 0;
4739 for (i = 0; i < rcu_num_lvls; i++)
4740 rcu_num_nodes += num_rcu_lvl[i];
4741 }
4742
4743 /*
4744 * Dump out the structure of the rcu_node combining tree associated
4745 * with the rcu_state structure.
4746 */
rcu_dump_rcu_node_tree(void)4747 static void __init rcu_dump_rcu_node_tree(void)
4748 {
4749 int level = 0;
4750 struct rcu_node *rnp;
4751
4752 pr_info("rcu_node tree layout dump\n");
4753 pr_info(" ");
4754 rcu_for_each_node_breadth_first(rnp) {
4755 if (rnp->level != level) {
4756 pr_cont("\n");
4757 pr_info(" ");
4758 level = rnp->level;
4759 }
4760 pr_cont("%d:%d ^%d ", rnp->grplo, rnp->grphi, rnp->grpnum);
4761 }
4762 pr_cont("\n");
4763 }
4764
4765 struct workqueue_struct *rcu_gp_wq;
4766
kfree_rcu_batch_init(void)4767 static void __init kfree_rcu_batch_init(void)
4768 {
4769 int cpu;
4770 int i;
4771
4772 /* Clamp it to [0:100] seconds interval. */
4773 if (rcu_delay_page_cache_fill_msec < 0 ||
4774 rcu_delay_page_cache_fill_msec > 100 * MSEC_PER_SEC) {
4775
4776 rcu_delay_page_cache_fill_msec =
4777 clamp(rcu_delay_page_cache_fill_msec, 0,
4778 (int) (100 * MSEC_PER_SEC));
4779
4780 pr_info("Adjusting rcutree.rcu_delay_page_cache_fill_msec to %d ms.\n",
4781 rcu_delay_page_cache_fill_msec);
4782 }
4783
4784 for_each_possible_cpu(cpu) {
4785 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
4786
4787 for (i = 0; i < KFREE_N_BATCHES; i++) {
4788 INIT_RCU_WORK(&krcp->krw_arr[i].rcu_work, kfree_rcu_work);
4789 krcp->krw_arr[i].krcp = krcp;
4790 }
4791
4792 INIT_DELAYED_WORK(&krcp->monitor_work, kfree_rcu_monitor);
4793 INIT_DELAYED_WORK(&krcp->page_cache_work, fill_page_cache_func);
4794 krcp->initialized = true;
4795 }
4796 if (register_shrinker(&kfree_rcu_shrinker))
4797 pr_err("Failed to register kfree_rcu() shrinker!\n");
4798 }
4799
rcu_init(void)4800 void __init rcu_init(void)
4801 {
4802 int cpu;
4803
4804 rcu_early_boot_tests();
4805
4806 kfree_rcu_batch_init();
4807 rcu_bootup_announce();
4808 rcu_init_geometry();
4809 rcu_init_one();
4810 if (dump_tree)
4811 rcu_dump_rcu_node_tree();
4812 if (use_softirq)
4813 open_softirq(RCU_SOFTIRQ, rcu_core_si);
4814
4815 /*
4816 * We don't need protection against CPU-hotplug here because
4817 * this is called early in boot, before either interrupts
4818 * or the scheduler are operational.
4819 */
4820 pm_notifier(rcu_pm_notify, 0);
4821 for_each_online_cpu(cpu) {
4822 rcutree_prepare_cpu(cpu);
4823 rcu_cpu_starting(cpu);
4824 rcutree_online_cpu(cpu);
4825 }
4826
4827 /* Create workqueue for Tree SRCU and for expedited GPs. */
4828 rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM, 0);
4829 WARN_ON(!rcu_gp_wq);
4830 rcu_alloc_par_gp_wq();
4831
4832 /* Fill in default value for rcutree.qovld boot parameter. */
4833 /* -After- the rcu_node ->lock fields are initialized! */
4834 if (qovld < 0)
4835 qovld_calc = DEFAULT_RCU_QOVLD_MULT * qhimark;
4836 else
4837 qovld_calc = qovld;
4838 }
4839
4840 #include "tree_stall.h"
4841 #include "tree_exp.h"
4842 #include "tree_nocb.h"
4843 #include "tree_plugin.h"
4844