1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Read-Copy Update mechanism for mutual exclusion
4 *
5 * Copyright IBM Corporation, 2001
6 *
7 * Authors: Dipankar Sarma <dipankar@in.ibm.com>
8 * Manfred Spraul <manfred@colorfullife.com>
9 *
10 * Based on the original work by Paul McKenney <paulmck@linux.ibm.com>
11 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
12 * Papers:
13 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
14 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
15 *
16 * For detailed explanation of Read-Copy Update mechanism see -
17 * http://lse.sourceforge.net/locking/rcupdate.html
18 *
19 */
20 #include <linux/types.h>
21 #include <linux/kernel.h>
22 #include <linux/init.h>
23 #include <linux/spinlock.h>
24 #include <linux/smp.h>
25 #include <linux/interrupt.h>
26 #include <linux/sched/signal.h>
27 #include <linux/sched/debug.h>
28 #include <linux/atomic.h>
29 #include <linux/bitops.h>
30 #include <linux/percpu.h>
31 #include <linux/notifier.h>
32 #include <linux/cpu.h>
33 #include <linux/mutex.h>
34 #include <linux/export.h>
35 #include <linux/hardirq.h>
36 #include <linux/delay.h>
37 #include <linux/moduleparam.h>
38 #include <linux/kthread.h>
39 #include <linux/tick.h>
40 #include <linux/rcupdate_wait.h>
41 #include <linux/sched/isolation.h>
42 #include <linux/kprobes.h>
43 #include <linux/slab.h>
44 #include <linux/irq_work.h>
45 #include <linux/rcupdate_trace.h>
46 #include <linux/jiffies.h>
47
48 #define CREATE_TRACE_POINTS
49
50 #include "rcu.h"
51
52 #ifdef MODULE_PARAM_PREFIX
53 #undef MODULE_PARAM_PREFIX
54 #endif
55 #define MODULE_PARAM_PREFIX "rcupdate."
56
57 #ifndef CONFIG_TINY_RCU
58 module_param(rcu_expedited, int, 0444);
59 module_param(rcu_normal, int, 0444);
60 static int rcu_normal_after_boot = IS_ENABLED(CONFIG_PREEMPT_RT);
61 #if !defined(CONFIG_PREEMPT_RT) || defined(CONFIG_NO_HZ_FULL)
62 module_param(rcu_normal_after_boot, int, 0444);
63 #endif
64 #endif /* #ifndef CONFIG_TINY_RCU */
65
66 #ifdef CONFIG_DEBUG_LOCK_ALLOC
67 /**
68 * rcu_read_lock_held_common() - might we be in RCU-sched read-side critical section?
69 * @ret: Best guess answer if lockdep cannot be relied on
70 *
71 * Returns true if lockdep must be ignored, in which case ``*ret`` contains
72 * the best guess described below. Otherwise returns false, in which
73 * case ``*ret`` tells the caller nothing and the caller should instead
74 * consult lockdep.
75 *
76 * If CONFIG_DEBUG_LOCK_ALLOC is selected, set ``*ret`` to nonzero iff in an
77 * RCU-sched read-side critical section. In absence of
78 * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
79 * critical section unless it can prove otherwise. Note that disabling
80 * of preemption (including disabling irqs) counts as an RCU-sched
81 * read-side critical section. This is useful for debug checks in functions
82 * that required that they be called within an RCU-sched read-side
83 * critical section.
84 *
85 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
86 * and while lockdep is disabled.
87 *
88 * Note that if the CPU is in the idle loop from an RCU point of view (ie:
89 * that we are in the section between ct_idle_enter() and ct_idle_exit())
90 * then rcu_read_lock_held() sets ``*ret`` to false even if the CPU did an
91 * rcu_read_lock(). The reason for this is that RCU ignores CPUs that are
92 * in such a section, considering these as in extended quiescent state,
93 * so such a CPU is effectively never in an RCU read-side critical section
94 * regardless of what RCU primitives it invokes. This state of affairs is
95 * required --- we need to keep an RCU-free window in idle where the CPU may
96 * possibly enter into low power mode. This way we can notice an extended
97 * quiescent state to other CPUs that started a grace period. Otherwise
98 * we would delay any grace period as long as we run in the idle task.
99 *
100 * Similarly, we avoid claiming an RCU read lock held if the current
101 * CPU is offline.
102 */
rcu_read_lock_held_common(bool * ret)103 static bool rcu_read_lock_held_common(bool *ret)
104 {
105 if (!debug_lockdep_rcu_enabled()) {
106 *ret = true;
107 return true;
108 }
109 if (!rcu_is_watching()) {
110 *ret = false;
111 return true;
112 }
113 if (!rcu_lockdep_current_cpu_online()) {
114 *ret = false;
115 return true;
116 }
117 return false;
118 }
119
rcu_read_lock_sched_held(void)120 int rcu_read_lock_sched_held(void)
121 {
122 bool ret;
123
124 if (rcu_read_lock_held_common(&ret))
125 return ret;
126 return lock_is_held(&rcu_sched_lock_map) || !preemptible();
127 }
128 EXPORT_SYMBOL(rcu_read_lock_sched_held);
129 #endif
130
131 #ifndef CONFIG_TINY_RCU
132
133 /*
134 * Should expedited grace-period primitives always fall back to their
135 * non-expedited counterparts? Intended for use within RCU. Note
136 * that if the user specifies both rcu_expedited and rcu_normal, then
137 * rcu_normal wins. (Except during the time period during boot from
138 * when the first task is spawned until the rcu_set_runtime_mode()
139 * core_initcall() is invoked, at which point everything is expedited.)
140 */
rcu_gp_is_normal(void)141 bool rcu_gp_is_normal(void)
142 {
143 return READ_ONCE(rcu_normal) &&
144 rcu_scheduler_active != RCU_SCHEDULER_INIT;
145 }
146 EXPORT_SYMBOL_GPL(rcu_gp_is_normal);
147
148 static atomic_t rcu_async_hurry_nesting = ATOMIC_INIT(1);
149 /*
150 * Should call_rcu() callbacks be processed with urgency or are
151 * they OK being executed with arbitrary delays?
152 */
rcu_async_should_hurry(void)153 bool rcu_async_should_hurry(void)
154 {
155 return !IS_ENABLED(CONFIG_RCU_LAZY) ||
156 atomic_read(&rcu_async_hurry_nesting);
157 }
158 EXPORT_SYMBOL_GPL(rcu_async_should_hurry);
159
160 /**
161 * rcu_async_hurry - Make future async RCU callbacks not lazy.
162 *
163 * After a call to this function, future calls to call_rcu()
164 * will be processed in a timely fashion.
165 */
rcu_async_hurry(void)166 void rcu_async_hurry(void)
167 {
168 if (IS_ENABLED(CONFIG_RCU_LAZY))
169 atomic_inc(&rcu_async_hurry_nesting);
170 }
171 EXPORT_SYMBOL_GPL(rcu_async_hurry);
172
173 /**
174 * rcu_async_relax - Make future async RCU callbacks lazy.
175 *
176 * After a call to this function, future calls to call_rcu()
177 * will be processed in a lazy fashion.
178 */
rcu_async_relax(void)179 void rcu_async_relax(void)
180 {
181 if (IS_ENABLED(CONFIG_RCU_LAZY))
182 atomic_dec(&rcu_async_hurry_nesting);
183 }
184 EXPORT_SYMBOL_GPL(rcu_async_relax);
185
186 static atomic_t rcu_expedited_nesting = ATOMIC_INIT(1);
187 /*
188 * Should normal grace-period primitives be expedited? Intended for
189 * use within RCU. Note that this function takes the rcu_expedited
190 * sysfs/boot variable and rcu_scheduler_active into account as well
191 * as the rcu_expedite_gp() nesting. So looping on rcu_unexpedite_gp()
192 * until rcu_gp_is_expedited() returns false is a -really- bad idea.
193 */
rcu_gp_is_expedited(void)194 bool rcu_gp_is_expedited(void)
195 {
196 return rcu_expedited || atomic_read(&rcu_expedited_nesting);
197 }
198 EXPORT_SYMBOL_GPL(rcu_gp_is_expedited);
199
200 /**
201 * rcu_expedite_gp - Expedite future RCU grace periods
202 *
203 * After a call to this function, future calls to synchronize_rcu() and
204 * friends act as the corresponding synchronize_rcu_expedited() function
205 * had instead been called.
206 */
rcu_expedite_gp(void)207 void rcu_expedite_gp(void)
208 {
209 atomic_inc(&rcu_expedited_nesting);
210 }
211 EXPORT_SYMBOL_GPL(rcu_expedite_gp);
212
213 /**
214 * rcu_unexpedite_gp - Cancel prior rcu_expedite_gp() invocation
215 *
216 * Undo a prior call to rcu_expedite_gp(). If all prior calls to
217 * rcu_expedite_gp() are undone by a subsequent call to rcu_unexpedite_gp(),
218 * and if the rcu_expedited sysfs/boot parameter is not set, then all
219 * subsequent calls to synchronize_rcu() and friends will return to
220 * their normal non-expedited behavior.
221 */
rcu_unexpedite_gp(void)222 void rcu_unexpedite_gp(void)
223 {
224 atomic_dec(&rcu_expedited_nesting);
225 }
226 EXPORT_SYMBOL_GPL(rcu_unexpedite_gp);
227
228 /*
229 * Minimum time in milliseconds from the start boot until RCU can consider
230 * in-kernel boot as completed. This can also be tuned at runtime to end the
231 * boot earlier, by userspace init code writing the time in milliseconds (even
232 * 0) to: /sys/module/rcupdate/parameters/rcu_boot_end_delay. The sysfs node
233 * can also be used to extend the delay to be larger than the default, assuming
234 * the marking of boot complete has not yet occurred.
235 */
236 static int rcu_boot_end_delay = CONFIG_RCU_BOOT_END_DELAY;
237
238 static bool rcu_boot_ended __read_mostly;
239 static bool rcu_boot_end_called __read_mostly;
240 static DEFINE_MUTEX(rcu_boot_end_lock);
241
242 /*
243 * Inform RCU of the end of the in-kernel boot sequence. The boot sequence will
244 * not be marked ended until at least rcu_boot_end_delay milliseconds have passed.
245 */
246 void rcu_end_inkernel_boot(void);
rcu_boot_end_work_fn(struct work_struct * work)247 static void rcu_boot_end_work_fn(struct work_struct *work)
248 {
249 rcu_end_inkernel_boot();
250 }
251 static DECLARE_DELAYED_WORK(rcu_boot_end_work, rcu_boot_end_work_fn);
252
253 /* Must be called with rcu_boot_end_lock held. */
rcu_end_inkernel_boot_locked(void)254 static void rcu_end_inkernel_boot_locked(void)
255 {
256 rcu_boot_end_called = true;
257
258 if (rcu_boot_ended)
259 return;
260
261 if (rcu_boot_end_delay) {
262 u64 boot_ms = div_u64(ktime_get_boot_fast_ns(), 1000000UL);
263
264 if (boot_ms < rcu_boot_end_delay) {
265 schedule_delayed_work(&rcu_boot_end_work,
266 msecs_to_jiffies(rcu_boot_end_delay - boot_ms));
267 return;
268 }
269 }
270
271 cancel_delayed_work(&rcu_boot_end_work);
272 rcu_unexpedite_gp();
273 rcu_async_relax();
274 if (rcu_normal_after_boot)
275 WRITE_ONCE(rcu_normal, 1);
276 rcu_boot_ended = true;
277 }
278
rcu_end_inkernel_boot(void)279 void rcu_end_inkernel_boot(void)
280 {
281 mutex_lock(&rcu_boot_end_lock);
282 rcu_end_inkernel_boot_locked();
283 mutex_unlock(&rcu_boot_end_lock);
284 }
285
param_set_rcu_boot_end(const char * val,const struct kernel_param * kp)286 static int param_set_rcu_boot_end(const char *val, const struct kernel_param *kp)
287 {
288 uint end_ms;
289 int ret = kstrtouint(val, 0, &end_ms);
290
291 if (ret)
292 return ret;
293 /*
294 * rcu_end_inkernel_boot() should be called at least once during init
295 * before we can allow param changes to end the boot.
296 */
297 mutex_lock(&rcu_boot_end_lock);
298 rcu_boot_end_delay = end_ms;
299 if (!rcu_boot_ended && rcu_boot_end_called) {
300 rcu_end_inkernel_boot_locked();
301 }
302 mutex_unlock(&rcu_boot_end_lock);
303 return ret;
304 }
305
306 static const struct kernel_param_ops rcu_boot_end_ops = {
307 .set = param_set_rcu_boot_end,
308 .get = param_get_uint,
309 };
310 module_param_cb(rcu_boot_end_delay, &rcu_boot_end_ops, &rcu_boot_end_delay, 0644);
311
312 /*
313 * Let rcutorture know when it is OK to turn it up to eleven.
314 */
rcu_inkernel_boot_has_ended(void)315 bool rcu_inkernel_boot_has_ended(void)
316 {
317 return rcu_boot_ended;
318 }
319 EXPORT_SYMBOL_GPL(rcu_inkernel_boot_has_ended);
320
321 #endif /* #ifndef CONFIG_TINY_RCU */
322
323 /*
324 * Test each non-SRCU synchronous grace-period wait API. This is
325 * useful just after a change in mode for these primitives, and
326 * during early boot.
327 */
rcu_test_sync_prims(void)328 void rcu_test_sync_prims(void)
329 {
330 if (!IS_ENABLED(CONFIG_PROVE_RCU))
331 return;
332 synchronize_rcu();
333 synchronize_rcu_expedited();
334 }
335
336 #if !defined(CONFIG_TINY_RCU) || defined(CONFIG_SRCU)
337
338 /*
339 * Switch to run-time mode once RCU has fully initialized.
340 */
rcu_set_runtime_mode(void)341 static int __init rcu_set_runtime_mode(void)
342 {
343 rcu_test_sync_prims();
344 rcu_scheduler_active = RCU_SCHEDULER_RUNNING;
345 kfree_rcu_scheduler_running();
346 rcu_test_sync_prims();
347 return 0;
348 }
349 core_initcall(rcu_set_runtime_mode);
350
351 #endif /* #if !defined(CONFIG_TINY_RCU) || defined(CONFIG_SRCU) */
352
353 #ifdef CONFIG_DEBUG_LOCK_ALLOC
354 static struct lock_class_key rcu_lock_key;
355 struct lockdep_map rcu_lock_map = {
356 .name = "rcu_read_lock",
357 .key = &rcu_lock_key,
358 .wait_type_outer = LD_WAIT_FREE,
359 .wait_type_inner = LD_WAIT_CONFIG, /* PREEMPT_RT implies PREEMPT_RCU */
360 };
361 EXPORT_SYMBOL_GPL(rcu_lock_map);
362
363 static struct lock_class_key rcu_bh_lock_key;
364 struct lockdep_map rcu_bh_lock_map = {
365 .name = "rcu_read_lock_bh",
366 .key = &rcu_bh_lock_key,
367 .wait_type_outer = LD_WAIT_FREE,
368 .wait_type_inner = LD_WAIT_CONFIG, /* PREEMPT_RT makes BH preemptible. */
369 };
370 EXPORT_SYMBOL_GPL(rcu_bh_lock_map);
371
372 static struct lock_class_key rcu_sched_lock_key;
373 struct lockdep_map rcu_sched_lock_map = {
374 .name = "rcu_read_lock_sched",
375 .key = &rcu_sched_lock_key,
376 .wait_type_outer = LD_WAIT_FREE,
377 .wait_type_inner = LD_WAIT_SPIN,
378 };
379 EXPORT_SYMBOL_GPL(rcu_sched_lock_map);
380
381 // Tell lockdep when RCU callbacks are being invoked.
382 static struct lock_class_key rcu_callback_key;
383 struct lockdep_map rcu_callback_map =
384 STATIC_LOCKDEP_MAP_INIT("rcu_callback", &rcu_callback_key);
385 EXPORT_SYMBOL_GPL(rcu_callback_map);
386
debug_lockdep_rcu_enabled(void)387 noinstr int notrace debug_lockdep_rcu_enabled(void)
388 {
389 return rcu_scheduler_active != RCU_SCHEDULER_INACTIVE && READ_ONCE(debug_locks) &&
390 current->lockdep_recursion == 0;
391 }
392 EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
393
394 /**
395 * rcu_read_lock_held() - might we be in RCU read-side critical section?
396 *
397 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU
398 * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC,
399 * this assumes we are in an RCU read-side critical section unless it can
400 * prove otherwise. This is useful for debug checks in functions that
401 * require that they be called within an RCU read-side critical section.
402 *
403 * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot
404 * and while lockdep is disabled.
405 *
406 * Note that rcu_read_lock() and the matching rcu_read_unlock() must
407 * occur in the same context, for example, it is illegal to invoke
408 * rcu_read_unlock() in process context if the matching rcu_read_lock()
409 * was invoked from within an irq handler.
410 *
411 * Note that rcu_read_lock() is disallowed if the CPU is either idle or
412 * offline from an RCU perspective, so check for those as well.
413 */
rcu_read_lock_held(void)414 int rcu_read_lock_held(void)
415 {
416 bool ret;
417
418 if (rcu_read_lock_held_common(&ret))
419 return ret;
420 return lock_is_held(&rcu_lock_map);
421 }
422 EXPORT_SYMBOL_GPL(rcu_read_lock_held);
423
424 /**
425 * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section?
426 *
427 * Check for bottom half being disabled, which covers both the
428 * CONFIG_PROVE_RCU and not cases. Note that if someone uses
429 * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled)
430 * will show the situation. This is useful for debug checks in functions
431 * that require that they be called within an RCU read-side critical
432 * section.
433 *
434 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot.
435 *
436 * Note that rcu_read_lock_bh() is disallowed if the CPU is either idle or
437 * offline from an RCU perspective, so check for those as well.
438 */
rcu_read_lock_bh_held(void)439 int rcu_read_lock_bh_held(void)
440 {
441 bool ret;
442
443 if (rcu_read_lock_held_common(&ret))
444 return ret;
445 return in_softirq() || irqs_disabled();
446 }
447 EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
448
rcu_read_lock_any_held(void)449 int rcu_read_lock_any_held(void)
450 {
451 bool ret;
452
453 if (rcu_read_lock_held_common(&ret))
454 return ret;
455 if (lock_is_held(&rcu_lock_map) ||
456 lock_is_held(&rcu_bh_lock_map) ||
457 lock_is_held(&rcu_sched_lock_map))
458 return 1;
459 return !preemptible();
460 }
461 EXPORT_SYMBOL_GPL(rcu_read_lock_any_held);
462
463 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
464
465 /**
466 * wakeme_after_rcu() - Callback function to awaken a task after grace period
467 * @head: Pointer to rcu_head member within rcu_synchronize structure
468 *
469 * Awaken the corresponding task now that a grace period has elapsed.
470 */
wakeme_after_rcu(struct rcu_head * head)471 void wakeme_after_rcu(struct rcu_head *head)
472 {
473 struct rcu_synchronize *rcu;
474
475 rcu = container_of(head, struct rcu_synchronize, head);
476 complete(&rcu->completion);
477 }
478 EXPORT_SYMBOL_GPL(wakeme_after_rcu);
479
__wait_rcu_gp(bool checktiny,int n,call_rcu_func_t * crcu_array,struct rcu_synchronize * rs_array)480 void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array,
481 struct rcu_synchronize *rs_array)
482 {
483 int i;
484 int j;
485
486 /* Initialize and register callbacks for each crcu_array element. */
487 for (i = 0; i < n; i++) {
488 if (checktiny &&
489 (crcu_array[i] == call_rcu)) {
490 might_sleep();
491 continue;
492 }
493 for (j = 0; j < i; j++)
494 if (crcu_array[j] == crcu_array[i])
495 break;
496 if (j == i) {
497 init_rcu_head_on_stack(&rs_array[i].head);
498 init_completion(&rs_array[i].completion);
499 (crcu_array[i])(&rs_array[i].head, wakeme_after_rcu);
500 }
501 }
502
503 /* Wait for all callbacks to be invoked. */
504 for (i = 0; i < n; i++) {
505 if (checktiny &&
506 (crcu_array[i] == call_rcu))
507 continue;
508 for (j = 0; j < i; j++)
509 if (crcu_array[j] == crcu_array[i])
510 break;
511 if (j == i) {
512 wait_for_completion(&rs_array[i].completion);
513 destroy_rcu_head_on_stack(&rs_array[i].head);
514 }
515 }
516 }
517 EXPORT_SYMBOL_GPL(__wait_rcu_gp);
518
finish_rcuwait(struct rcuwait * w)519 void finish_rcuwait(struct rcuwait *w)
520 {
521 rcu_assign_pointer(w->task, NULL);
522 __set_current_state(TASK_RUNNING);
523 }
524 EXPORT_SYMBOL_GPL(finish_rcuwait);
525
526 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
init_rcu_head(struct rcu_head * head)527 void init_rcu_head(struct rcu_head *head)
528 {
529 debug_object_init(head, &rcuhead_debug_descr);
530 }
531 EXPORT_SYMBOL_GPL(init_rcu_head);
532
destroy_rcu_head(struct rcu_head * head)533 void destroy_rcu_head(struct rcu_head *head)
534 {
535 debug_object_free(head, &rcuhead_debug_descr);
536 }
537 EXPORT_SYMBOL_GPL(destroy_rcu_head);
538
rcuhead_is_static_object(void * addr)539 static bool rcuhead_is_static_object(void *addr)
540 {
541 return true;
542 }
543
544 /**
545 * init_rcu_head_on_stack() - initialize on-stack rcu_head for debugobjects
546 * @head: pointer to rcu_head structure to be initialized
547 *
548 * This function informs debugobjects of a new rcu_head structure that
549 * has been allocated as an auto variable on the stack. This function
550 * is not required for rcu_head structures that are statically defined or
551 * that are dynamically allocated on the heap. This function has no
552 * effect for !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds.
553 */
init_rcu_head_on_stack(struct rcu_head * head)554 void init_rcu_head_on_stack(struct rcu_head *head)
555 {
556 debug_object_init_on_stack(head, &rcuhead_debug_descr);
557 }
558 EXPORT_SYMBOL_GPL(init_rcu_head_on_stack);
559
560 /**
561 * destroy_rcu_head_on_stack() - destroy on-stack rcu_head for debugobjects
562 * @head: pointer to rcu_head structure to be initialized
563 *
564 * This function informs debugobjects that an on-stack rcu_head structure
565 * is about to go out of scope. As with init_rcu_head_on_stack(), this
566 * function is not required for rcu_head structures that are statically
567 * defined or that are dynamically allocated on the heap. Also as with
568 * init_rcu_head_on_stack(), this function has no effect for
569 * !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds.
570 */
destroy_rcu_head_on_stack(struct rcu_head * head)571 void destroy_rcu_head_on_stack(struct rcu_head *head)
572 {
573 debug_object_free(head, &rcuhead_debug_descr);
574 }
575 EXPORT_SYMBOL_GPL(destroy_rcu_head_on_stack);
576
577 const struct debug_obj_descr rcuhead_debug_descr = {
578 .name = "rcu_head",
579 .is_static_object = rcuhead_is_static_object,
580 };
581 EXPORT_SYMBOL_GPL(rcuhead_debug_descr);
582 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
583
584 #if defined(CONFIG_TREE_RCU) || defined(CONFIG_RCU_TRACE)
do_trace_rcu_torture_read(const char * rcutorturename,struct rcu_head * rhp,unsigned long secs,unsigned long c_old,unsigned long c)585 void do_trace_rcu_torture_read(const char *rcutorturename, struct rcu_head *rhp,
586 unsigned long secs,
587 unsigned long c_old, unsigned long c)
588 {
589 trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c);
590 }
591 EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read);
592 #else
593 #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
594 do { } while (0)
595 #endif
596
597 #if IS_ENABLED(CONFIG_RCU_TORTURE_TEST) || IS_MODULE(CONFIG_RCU_TORTURE_TEST)
598 /* Get rcutorture access to sched_setaffinity(). */
rcutorture_sched_setaffinity(pid_t pid,const struct cpumask * in_mask)599 long rcutorture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
600 {
601 int ret;
602
603 ret = sched_setaffinity(pid, in_mask);
604 WARN_ONCE(ret, "%s: sched_setaffinity() returned %d\n", __func__, ret);
605 return ret;
606 }
607 EXPORT_SYMBOL_GPL(rcutorture_sched_setaffinity);
608 #endif
609
610 #ifdef CONFIG_RCU_STALL_COMMON
611 int rcu_cpu_stall_ftrace_dump __read_mostly;
612 module_param(rcu_cpu_stall_ftrace_dump, int, 0644);
613 int rcu_cpu_stall_suppress __read_mostly; // !0 = suppress stall warnings.
614 EXPORT_SYMBOL_GPL(rcu_cpu_stall_suppress);
615 module_param(rcu_cpu_stall_suppress, int, 0644);
616 int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT;
617 module_param(rcu_cpu_stall_timeout, int, 0644);
618 int rcu_exp_cpu_stall_timeout __read_mostly = CONFIG_RCU_EXP_CPU_STALL_TIMEOUT;
619 module_param(rcu_exp_cpu_stall_timeout, int, 0644);
620 #endif /* #ifdef CONFIG_RCU_STALL_COMMON */
621
622 // Suppress boot-time RCU CPU stall warnings and rcutorture writer stall
623 // warnings. Also used by rcutorture even if stall warnings are excluded.
624 int rcu_cpu_stall_suppress_at_boot __read_mostly; // !0 = suppress boot stalls.
625 EXPORT_SYMBOL_GPL(rcu_cpu_stall_suppress_at_boot);
626 module_param(rcu_cpu_stall_suppress_at_boot, int, 0444);
627
628 /**
629 * get_completed_synchronize_rcu - Return a pre-completed polled state cookie
630 *
631 * Returns a value that will always be treated by functions like
632 * poll_state_synchronize_rcu() as a cookie whose grace period has already
633 * completed.
634 */
get_completed_synchronize_rcu(void)635 unsigned long get_completed_synchronize_rcu(void)
636 {
637 return RCU_GET_STATE_COMPLETED;
638 }
639 EXPORT_SYMBOL_GPL(get_completed_synchronize_rcu);
640
641 #ifdef CONFIG_PROVE_RCU
642
643 /*
644 * Early boot self test parameters.
645 */
646 static bool rcu_self_test;
647 module_param(rcu_self_test, bool, 0444);
648
649 static int rcu_self_test_counter;
650
test_callback(struct rcu_head * r)651 static void test_callback(struct rcu_head *r)
652 {
653 rcu_self_test_counter++;
654 pr_info("RCU test callback executed %d\n", rcu_self_test_counter);
655 }
656
657 DEFINE_STATIC_SRCU(early_srcu);
658 static unsigned long early_srcu_cookie;
659
660 struct early_boot_kfree_rcu {
661 struct rcu_head rh;
662 };
663
early_boot_test_call_rcu(void)664 static void early_boot_test_call_rcu(void)
665 {
666 static struct rcu_head head;
667 static struct rcu_head shead;
668 struct early_boot_kfree_rcu *rhp;
669
670 call_rcu(&head, test_callback);
671 if (IS_ENABLED(CONFIG_SRCU)) {
672 early_srcu_cookie = start_poll_synchronize_srcu(&early_srcu);
673 call_srcu(&early_srcu, &shead, test_callback);
674 }
675 rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
676 if (!WARN_ON_ONCE(!rhp))
677 kfree_rcu(rhp, rh);
678 }
679
rcu_early_boot_tests(void)680 void rcu_early_boot_tests(void)
681 {
682 pr_info("Running RCU self tests\n");
683
684 if (rcu_self_test)
685 early_boot_test_call_rcu();
686 rcu_test_sync_prims();
687 }
688
rcu_verify_early_boot_tests(void)689 static int rcu_verify_early_boot_tests(void)
690 {
691 int ret = 0;
692 int early_boot_test_counter = 0;
693
694 if (rcu_self_test) {
695 early_boot_test_counter++;
696 rcu_barrier();
697 if (IS_ENABLED(CONFIG_SRCU)) {
698 early_boot_test_counter++;
699 srcu_barrier(&early_srcu);
700 WARN_ON_ONCE(!poll_state_synchronize_srcu(&early_srcu, early_srcu_cookie));
701 }
702 }
703 if (rcu_self_test_counter != early_boot_test_counter) {
704 WARN_ON(1);
705 ret = -1;
706 }
707
708 return ret;
709 }
710 late_initcall(rcu_verify_early_boot_tests);
711 #else
rcu_early_boot_tests(void)712 void rcu_early_boot_tests(void) {}
713 #endif /* CONFIG_PROVE_RCU */
714
715 #include "tasks.h"
716
717 #ifndef CONFIG_TINY_RCU
718
719 /*
720 * Print any significant non-default boot-time settings.
721 */
rcupdate_announce_bootup_oddness(void)722 void __init rcupdate_announce_bootup_oddness(void)
723 {
724 if (rcu_normal)
725 pr_info("\tNo expedited grace period (rcu_normal).\n");
726 else if (rcu_normal_after_boot)
727 pr_info("\tNo expedited grace period (rcu_normal_after_boot).\n");
728 else if (rcu_expedited)
729 pr_info("\tAll grace periods are expedited (rcu_expedited).\n");
730 if (rcu_cpu_stall_suppress)
731 pr_info("\tRCU CPU stall warnings suppressed (rcu_cpu_stall_suppress).\n");
732 if (rcu_cpu_stall_timeout != CONFIG_RCU_CPU_STALL_TIMEOUT)
733 pr_info("\tRCU CPU stall warnings timeout set to %d (rcu_cpu_stall_timeout).\n", rcu_cpu_stall_timeout);
734 rcu_tasks_bootup_oddness();
735 }
736
737 #endif /* #ifndef CONFIG_TINY_RCU */
738